summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGary King <gking@nvidia.com>2009-12-11 14:12:23 -0800
committerGary King <gking@nvidia.com>2009-12-11 14:13:37 -0800
commitfb0d4cc020403e874193ab0b6ef463414e4957c1 (patch)
tree9cd2a41b458e9fcaf6838d32fe9fdc051cff9e7e
parentca2b56d4a0d2c83b722819416dbd4387bac3c0b2 (diff)
tegra-rm: branch over Tegra and Tegra 2 RM from Perforce CL 5140548
Update all license grants to BSD/MIT-like Remove some deadcode from the kernel fork of the codebase Change-Id: I73c130a8094972497c7dc7dba65755bd16175819
-rw-r--r--arch/arm/mach-tegra/Kconfig6
-rw-r--r--arch/arm/mach-tegra/Makefile3
-rw-r--r--arch/arm/mach-tegra/include/nvrm_analog.h217
-rw-r--r--arch/arm/mach-tegra/include/nvrm_arm_cp.h189
-rw-r--r--arch/arm/mach-tegra/include/nvrm_boot.h58
-rw-r--r--arch/arm/mach-tegra/include/nvrm_diag.h552
-rw-r--r--arch/arm/mach-tegra/include/nvrm_dma.h384
-rw-r--r--arch/arm/mach-tegra/include/nvrm_drf.h156
-rw-r--r--arch/arm/mach-tegra/include/nvrm_gpio.h389
-rw-r--r--arch/arm/mach-tegra/include/nvrm_hardware_access.h151
-rw-r--r--arch/arm/mach-tegra/include/nvrm_i2c.h216
-rw-r--r--arch/arm/mach-tegra/include/nvrm_init.h142
-rw-r--r--arch/arm/mach-tegra/include/nvrm_interrupt.h271
-rw-r--r--arch/arm/mach-tegra/include/nvrm_keylist.h96
-rw-r--r--arch/arm/mach-tegra/include/nvrm_memctrl.h189
-rw-r--r--arch/arm/mach-tegra/include/nvrm_memmgr.h1013
-rw-r--r--arch/arm/mach-tegra/include/nvrm_minikernel.h57
-rw-r--r--arch/arm/mach-tegra/include/nvrm_module.h732
-rw-r--r--arch/arm/mach-tegra/include/nvrm_owr.h161
-rw-r--r--arch/arm/mach-tegra/include/nvrm_pcie.h147
-rw-r--r--arch/arm/mach-tegra/include/nvrm_pinmux.h222
-rw-r--r--arch/arm/mach-tegra/include/nvrm_pmu.h420
-rw-r--r--arch/arm/mach-tegra/include/nvrm_power.h1326
-rw-r--r--arch/arm/mach-tegra/include/nvrm_pwm.h180
-rw-r--r--arch/arm/mach-tegra/include/nvrm_rmctrace.h147
-rw-r--r--arch/arm/mach-tegra/include/nvrm_spi.h370
-rw-r--r--arch/arm/mach-tegra/nvrm/Makefile18
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/Makefile30
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clock_config.c2689
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clock_misc.c511
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clocks.c924
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clocks.h446
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clocks_info.c1673
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_fuse.c104
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_gart.c257
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_hwmap.c51
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init.c718
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init_common.c523
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_interrupt.c314
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_interrupt_generic.c96
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_memctrl.c564
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_pinmux_tables.c1166
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_pmc_scratch_map.h73
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power.c568
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power_dfs.c544
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power_dfs.h314
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power_oalintf.c330
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_private.h364
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_reloctable.c50
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap16rm_pinmux_tables.c325
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/ap16rm_reloctable.c50
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/nvrm_clocks.c3215
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap15/nvrm_diag.c1376
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/Makefile18
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c1546
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clocks.c1308
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clocks.h278
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clocks_info.c1827
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_fuse.c78
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_gart.c257
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_memctrl.c164
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_misc_private.h63
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_pinmux_tables.c1213
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_power_dfs.c371
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_power_dfs.h347
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_reloctable.c50
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/Makefile27
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/chiplib_interface.h182
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_chipid.h101
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_chiplib.c846
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_chiplib.h94
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_clockids.h86
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks.h1356
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks_limits.c939
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks_limits_private.h308
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks_limits_stub.c44
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_configuration.c146
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_configuration.h105
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_heap.h219
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_carveout.c188
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_iram.c128
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.c555
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.h85
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_hw_devids.h447
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_hwintf.c200
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_hwintf.h42
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_ioctls.h74
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_keylist.c197
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_memmgr.c1212
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_memmgr_private.h88
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_module.c279
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_module_common.c233
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_module_private.h79
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_moduleids.h51
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_pinmux.c1015
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_pinmux_utils.h376
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_pmu.c608
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_pmu_private.h146
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_power.c1531
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c3523
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.h560
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_power_private.h549
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_priv_ap_general.h60
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_processor.h145
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_relocation_table.c679
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_relocation_table.h266
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_rmctrace.c49
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_structure.h152
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/Makefile28
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/NvRm_Dispatch.c151
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_analog_dispatch.c260
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_diag_dispatch.c1078
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_dma_dispatch.c372
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_gpio_dispatch.c566
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_i2c_dispatch.c240
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_init_dispatch.c223
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_interrupt_dispatch.c144
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_keylist_dispatch.c144
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_memctrl_dispatch.c383
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_memmgr_dispatch.c941
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_module_dispatch.c974
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_owr_dispatch.c237
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_pcie_dispatch.c334
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_pinmux_dispatch.c301
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_pmu_dispatch.c617
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_power_dispatch.c1916
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_pwm_dispatch.c187
-rw-r--r--arch/arm/mach-tegra/nvrm/dispatch/nvrm_spi_dispatch.c407
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/Makefile24
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_analog.c2175
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_dma_hw_private.c336
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_dma_intr.c94
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_gpio_vi.c338
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_gpio_vi.h64
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_i2c.c742
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c505
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm_private.h91
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_slink_hw_private.c300
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/nvrm_dma.c1926
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio.c590
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio_private.c186
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio_private.h129
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio_stub_helper.c197
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/rm_common_slink_hw_private.c467
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/rm_dma_hw_private.c566
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/rm_dma_hw_private.h271
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_hw_private.c612
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_slink.c2932
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_slink.h66
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_slink_hw_private.h391
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap20/Makefile13
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_i2c.c1486
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_owr.c853
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_pcie.c2122
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_pcie_private.h245
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_slink_hw_private.c332
-rw-r--r--arch/arm/mach-tegra/nvrm/io/common/Makefile12
-rw-r--r--arch/arm/mach-tegra/nvrm/io/common/nvrm_gpioi2c.c463
-rw-r--r--arch/arm/mach-tegra/nvrm/io/common/nvrm_i2c.c652
-rw-r--r--arch/arm/mach-tegra/nvrm/io/common/nvrm_i2c_private.h305
-rw-r--r--arch/arm/mach-tegra/nvrm/io/common/nvrm_owr.c387
-rw-r--r--arch/arm/mach-tegra/nvrm/io/common/nvrm_owr_private.h244
162 files changed, 80196 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 35d9ddb45360..40543237c953 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -42,4 +42,10 @@ config TEGRA_SYSTEM_DMA
Adds system DMA functionality for NVIDIA Tegra SoCs, used by
several Tegra device drivers
+config MACH_TEGRA_GENERIC_DEBUG
+ bool "Enable debug logging for Tegra generic drivers"
+ depends on MACH_TEGRA_GENERIC
+ help
+ Enables debug logging for the generic Tegra NvRm and NvOs drivers
+
endif \ No newline at end of file
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index d1dbf71f5367..7abcf1d37a9d 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -7,8 +7,11 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
# SMP support
obj-$(CONFIG_SMP) += headsmp.o
+<<<<<<< HEAD:arch/arm/mach-tegra/Makefile
# System DMA
obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o
# NvOs / NvRm-based kernel implementation
obj-$(CONFIG_MACH_TEGRA_GENERIC) += nvddk/
+# NvOs / NvRm-based kernel implementation
+obj-$(CONFIG_MACH_TEGRA_GENERIC) += nvrm/
diff --git a/arch/arm/mach-tegra/include/nvrm_analog.h b/arch/arm/mach-tegra/include/nvrm_analog.h
new file mode 100644
index 000000000000..fd53d1e62c9d
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_analog.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_analog_H
+#define INCLUDED_nvrm_analog_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_module.h"
+#include "nvrm_init.h"
+
+#include "nvodm_query.h"
+
+/**
+ * List of controllable analog interfaces. Multiple instances of any
+ * particlar interface will be handled by the NVRM_ANALOG_INTERFACE macro
+ * below.
+ */
+
+typedef enum
+{
+ NvRmAnalogInterface_Dsi,
+ NvRmAnalogInterface_ExternalMemory,
+ NvRmAnalogInterface_Hdmi,
+ NvRmAnalogInterface_Lcd,
+ NvRmAnalogInterface_Uart,
+ NvRmAnalogInterface_Usb,
+ NvRmAnalogInterface_Sdio,
+ NvRmAnalogInterface_Tv,
+ NvRmAnalogInterface_VideoInput,
+ NvRmAnalogInterface_Num,
+ NvRmAnalogInterface_Force32 = 0x7FFFFFFF
+} NvRmAnalogInterface;
+
+/**
+ * Defines the USB Line state
+ */
+
+typedef enum
+{
+ NvRmUsbLineStateType_SE0 = 0,
+ NvRmUsbLineStateType_SJ = 1,
+ NvRmUsbLineStateType_SK = 2,
+ NvRmUsbLineStateType_SE1 = 3,
+ NvRmUsbLineStateType_Num,
+ NvRmUsbLineStateType_Force32 = 0x7FFFFFFF
+} NvRmUsbLineStateType;
+
+/**
+ * List of analog TV DAC type
+ */
+
+typedef enum
+{
+ NvRmAnalogTvDacType_CRT,
+ NvRmAnalogTvDacType_SDTV,
+ NvRmAnalogTvDacType_HDTV,
+ NvRmAnalogTvDacType_Num,
+ NvRmAnalogTvDacType_Force32 = 0x7FFFFFFF
+} NvRmAnalogTvDacType;
+
+/**
+ * Create an analog interface id with multiple instances.
+ */
+#define NVRM_ANALOG_INTERFACE( id, instance ) \
+ ((NvRmAnalogInterface)( (instance) << 16 | id ))
+
+/**
+ * Get the interface id.
+ */
+#define NVRM_ANALOG_INTERFACE_ID( id ) ((id) & 0xFFFF)
+
+/**
+ * Get the interface instance.
+ */
+#define NVRM_ANALOG_INTERFACE_INSTANCE( id ) (((id) >> 16) & 0xFFFF)
+
+/**
+ * Control I/O pads, DACs, or PHYs, either enable or disable, with an optional
+ * configuration structure, which may be defined per module.
+ *
+ * @param hDevice Handle to the RM device
+ * @param Interface The physical interface to configure
+ * @param Enable enable/disable bit
+ * @param Config extra configuration options for each module, if necessary
+ * @param ConfigLength the size in bytes of the configuration structure
+ */
+
+ NvError NvRmAnalogInterfaceControl(
+ NvRmDeviceHandle hDevice,
+ NvRmAnalogInterface Interface,
+ NvBool Enable,
+ void* Config,
+ NvU32 ConfigLength );
+
+/**
+ * Get TV DAC Configuration
+ *
+ * @param hDevice Handle to the RM device
+ * @param Type The analog TV DAC type
+ * @return The analog TV DAC Configuration value
+ */
+
+ NvU8 NvRmAnalogGetTvDacConfiguration(
+ NvRmDeviceHandle hDevice,
+ NvRmAnalogTvDacType Type );
+
+/**
+ * Detect if USB is connected or not
+ *
+ * @param hDevice Handle to the RM device
+ * @return TRUE means USB is connected
+ */
+
+ NvBool NvRmUsbIsConnected(
+ NvRmDeviceHandle hDevice );
+
+/**
+ * Detect charger type
+ *
+ * @param hDevice Handle to the RM device
+ * @param wait Delay time and ready to get the correct charger type
+ * @return USB charger type
+ */
+
+ NvU32 NvRmUsbDetectChargerState(
+ NvRmDeviceHandle hDevice,
+ NvU32 wait );
+
+/**
+ * Extended configuration structures for NvRmAnalogInterfaceControl.
+ */
+
+typedef struct NvRmAnalogTvDacConfigRec
+{
+
+ /* The DAC input source, may be a Display controller or the TVO engine */
+ NvRmModuleID Source;
+
+ /* The DAC output amplitude */
+ NvU8 DacAmplitude;
+} NvRmAnalogTvDacConfig;
+
+/**
+ * List of USB analog status check parameters
+ */
+
+typedef enum
+{
+ NvRmAnalogUsbInputParam_CheckCableStatus,
+ NvRmAnalogUsbInputParam_CheckChargerStatus,
+ NvRmAnalogUsbInputParam_CheckIdStatus,
+ NvRmAnalogUsbInputParam_WaitForPhyClock,
+ NvRmAnalogUsbInputParam_ConfigureUsbPhy,
+ NvRmAnalogUsbInputParam_ChargerDetection,
+ NvRmAnalogUsbInputParam_SetUlpiNullTrimmers,
+ NvRmAnalogUsbInputParam_ConfigureUlpiNullClock,
+ NvRmAnalogUsbInputParam_SetNullUlpiPinMux,
+ NvRmAnalogUsbInputParam_SetUlpiLinkTrimmers,
+ NvRmAnalogUsbInputParam_VbusInterrupt,
+ NvRmAnalogUsbInputParam_IdInterrupt,
+ NvRmAnalogUsbInputParam_Num,
+ NvRmAnalogUsbInputParam_Force32 = 0x7FFFFFFF
+} NvRmAnalogUsbInputParam;
+
+/**
+ * Extended configuration structures for NvRmAnalogInterfaceControl for USB.
+ */
+
+typedef struct NvRmAnalogUsbConfigRec
+{
+
+ /* The USB Status check parameter */
+ NvRmAnalogUsbInputParam InParam;
+ NvBool UsbCableDetected;
+ NvBool UsbChargerDetected;
+ NvBool UsbIdDetected;
+} NvRmAnalogUsbConfig;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_arm_cp.h b/arch/arm/mach-tegra/include/nvrm_arm_cp.h
new file mode 100644
index 000000000000..30bdd971f4cf
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_arm_cp.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+
+#ifndef INCLUDED_ARM_CP_H
+#define INCLUDED_ARM_CP_H
+
+#include "nvassert.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+//==========================================================================
+// Compiler-specific status and coprocessor register abstraction macros.
+//==========================================================================
+
+#if defined(_MSC_VER) && NVOS_IS_WINDOWS_CE // Microsoft compiler on WinCE
+
+ // Define the standard ARM coprocessor register names because the ARM compiler requires
+ // that we use the names and the Microsoft compiler requires that we use the numbers for
+ // its intrinsic functions _MoveToCoprocessor() and _MoveFromCoprocessor().
+ #define p14 14
+ #define p15 15
+ #define c0 0
+ #define c1 1
+ #define c2 2
+ #define c3 3
+ #define c4 4
+ #define c5 5
+ #define c6 6
+ #define c7 7
+ #define c8 8
+ #define c9 9
+ #define c10 10
+ #define c11 11
+ #define c12 12
+ #define c13 13
+ #define c14 14
+ #define c15 15
+
+ /*
+ * @brief Macro to abstract writing of a ARM coprocessor register via the MCR instruction.
+ * @param cp is the coprocessor name (e.g., p15)
+ * @param op1 is a coprocessor-specific operation code (must be a manifest constant).
+ * @param Rd is a variable that will receive the value read from the coprocessor register.
+ * @param CRn is the destination coprocessor register (e.g., c7).
+ * @param CRm is an additional destination coprocessor register (e.g., c2).
+ * @param op2 is a coprocessor-specific operation code (must be a manifest constant).
+ */
+ #define MCR(cp,op1,Rd,CRn,CRm,op2) _MoveToCoprocessor((NvU32)(Rd), cp, op1, CRn, CRm, op2)
+
+ /*
+ * @brief Macro to abstract reading of a ARM coprocessor register via the MRC instruction.
+ * @param cp is the coprocessor name (e.g., p15)
+ * @param op1 is a coprocessor-specific operation code (must be a manifest constant).
+ * @param Rd is a variable that will receive the value read from the coprocessor register.
+ * @param CRn is the destination coprocessor register (e.g., c7).
+ * @param CRm is an additional destination coprocessor register (e.g., c2).
+ * @param op2 is a coprocessor-specific operation code (must be a manifest constant).
+ */
+ #define MRC(cp,op1,Rd,CRn,CRm,op2) *((NvU32*)(&(Rd))) = _MoveFromCoprocessor(cp, op1, CRn, CRm, op2)
+
+#elif defined(__ARMCC_VERSION) // ARM compiler
+
+ /*
+ * @brief Macro to abstract writing of a ARM coprocessor register via the MCR instruction.
+ * @param cp is the coprocessor name (e.g., p15)
+ * @param op1 is a coprocessor-specific operation code (must be a manifest constant).
+ * @param Rd is a variable that will be written to the coprocessor register.
+ * @param CRn is the destination coprocessor register (e.g., c7)
+ * @param CRm is an additional destination coprocessor register (e.g., c2).
+ * @param op2 is a coprocessor-specific operation code (must be a manifest constant).
+ */
+ #define MCR(cp,op1,Rd,CRn,CRm,op2) __asm { MCR cp, op1, Rd, CRn, CRm, op2 }
+
+ /*
+ * @brief Macro to abstract reading of a ARM coprocessor register via the MRC instruction.
+ * @param cp is the coprocessor name (e.g., p15)
+ * @param op1 is a coprocessor-specific operation code (must be a manifest constant).
+ * @param Rd is a variable that will receive the value read from the coprocessor register.
+ * @param CRn is the destination coprocessor register (e.g., c7).
+ * @param CRm is an additional destination coprocessor register (e.g., c2).
+ * @param op2 is a coprocessor-specific operation code (must be a manifest constant).
+ */
+ #define MRC(cp,op1,Rd,CRn,CRm,op2) __asm { MRC cp, op1, Rd, CRn, CRm, op2 }
+
+#elif NVOS_IS_LINUX || __GNUC__ // linux compilers
+
+ #if defined(__arm__) // ARM GNU compiler
+
+ // Define the standard ARM coprocessor register names because the ARM compiler requires
+ // that we use the names and the GNU compiler requires that we use the numbers.
+ #define p14 14
+ #define p15 15
+ #define c0 0
+ #define c1 1
+ #define c2 2
+ #define c3 3
+ #define c4 4
+ #define c5 5
+ #define c6 6
+ #define c7 7
+ #define c8 8
+ #define c9 9
+ #define c10 10
+ #define c11 11
+ #define c12 12
+ #define c13 13
+ #define c14 14
+ #define c15 15
+
+ /*
+ * @brief Macro to abstract writing of a ARM coprocessor register via the MCR instruction.
+ * @param cp is the coprocessor name (e.g., p15)
+ * @param op1 is a coprocessor-specific operation code (must be a manifest constant).
+ * @param Rd is a variable that will receive the value read from the coprocessor register.
+ * @param CRn is the destination coprocessor register (e.g., c7).
+ * @param CRm is an additional destination coprocessor register (e.g., c2).
+ * @param op2 is a coprocessor-specific operation code (must be a manifest constant).
+ */
+ #define MCR(cp,op1,Rd,CRn,CRm,op2) asm(" MCR " #cp",%1,%2,"#CRn","#CRm ",%5" \
+ : : "i" (cp), "i" (op1), "r" (Rd), "i" (CRn), "i" (CRm), "i" (op2))
+
+ /*
+ * @brief Macro to abstract reading of a ARM coprocessor register via the MRC instruction.
+ * @param cp is the coprocessor name (e.g., p15)
+ * @param op1 is a coprocessor-specific operation code (must be a manifest constant).
+ * @param Rd is a variable that will receive the value read from the coprocessor register.
+ * @param CRn is the destination coprocessor register (e.g., c7).
+ * @param CRm is an additional destination coprocessor register (e.g., c2).
+ * @param op2 is a coprocessor-specific operation code (must be a manifest constant).
+ */
+ #define MRC(cp,op1,Rd,CRn,CRm,op2) asm( " MRC " #cp",%2,%0," #CRn","#CRm",%5" \
+ : "=r" (Rd) : "i" (cp), "i" (op1), "i" (CRn), "i" (CRm), "i" (op2))
+
+ #else
+
+ /* x86 processor. No such instructions. Callers should not call these macros
+ * when running on x86. If they do, it will compile but will not work. */
+ #define MCR(cp,op1,Rd,CRn,CRm,op2) do { Rd = Rd; NV_ASSERT(0); } while (0)
+ #define MRC(cp,op1,Rd,CRn,CRm,op2) do { Rd = 0; /*NV_ASSERT(0);*/ } while (0)
+
+ #endif
+#else
+
+ // !!!FIXME!!! TEST FOR ALL KNOWN COMPILERS -- FOR NOW JUST DIE AT RUN-TIME
+ // #error "Unknown compiler"
+ #define MCR(cp,op1,Rd,CRn,CRm,op2) do { Rd = Rd; NV_ASSERT(0); } while (0)
+ #define MRC(cp,op1,Rd,CRn,CRm,op2) do { Rd = 0; /*NV_ASSERT(0);*/ } while (0)
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // INCLUDED_ARM_CP_H
+
diff --git a/arch/arm/mach-tegra/include/nvrm_boot.h b/arch/arm/mach-tegra/include/nvrm_boot.h
new file mode 100644
index 000000000000..75258d068ec9
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_boot.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_BOOT_H
+#define INCLUDED_NVRM_BOOT_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_init.h"
+
+/**
+ * Sets the RM chip shmoo data as a boot argument from the system's
+ * boot loader.
+ *
+ * @param hRmDevice The RM device handle.
+ *
+ * @retval NvSuccess If successful, or the appropriate error code.
+ */
+NvError NvRmBootArgChipShmooSet(NvRmDeviceHandle hRmDevice);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif // INCLUDED_NVRM_BOOT_H
diff --git a/arch/arm/mach-tegra/include/nvrm_diag.h b/arch/arm/mach-tegra/include/nvrm_diag.h
new file mode 100644
index 000000000000..4ec86ed55f7e
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_diag.h
@@ -0,0 +1,552 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_diag_H
+#define INCLUDED_nvrm_diag_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_init.h"
+
+#include "nvcommon.h"
+
+/**
+ * All of the hardware modules. Multiple instances are handled by the
+ * NVRM_DIAG_MODULE macro.
+ */
+
+typedef enum
+{
+ NvRmDiagModuleID_Cache = 1,
+ NvRmDiagModuleID_Vcp,
+ NvRmDiagModuleID_Host1x,
+ NvRmDiagModuleID_Display,
+ NvRmDiagModuleID_Ide,
+ NvRmDiagModuleID_3d,
+ NvRmDiagModuleID_Isp,
+ NvRmDiagModuleID_Usb,
+ NvRmDiagModuleID_2d,
+ NvRmDiagModuleID_Vi,
+ NvRmDiagModuleID_Epp,
+ NvRmDiagModuleID_I2s,
+ NvRmDiagModuleID_Pwm,
+ NvRmDiagModuleID_Twc,
+ NvRmDiagModuleID_Hsmmc,
+ NvRmDiagModuleID_Sdio,
+ NvRmDiagModuleID_NandFlash,
+ NvRmDiagModuleID_I2c,
+ NvRmDiagModuleID_Spdif,
+ NvRmDiagModuleID_Gpio,
+ NvRmDiagModuleID_Uart,
+ NvRmDiagModuleID_Timer,
+ NvRmDiagModuleID_Rtc,
+ NvRmDiagModuleID_Ac97,
+ NvRmDiagModuleID_Coprocessor,
+ NvRmDiagModuleID_Cpu,
+ NvRmDiagModuleID_Bsev,
+ NvRmDiagModuleID_Bsea,
+ NvRmDiagModuleID_Vde,
+ NvRmDiagModuleID_Mpe,
+ NvRmDiagModuleID_Emc,
+ NvRmDiagModuleID_Sprom,
+ NvRmDiagModuleID_Tvdac,
+ NvRmDiagModuleID_Csi,
+ NvRmDiagModuleID_Hdmi,
+ NvRmDiagModuleID_MipiBaseband,
+ NvRmDiagModuleID_Tvo,
+ NvRmDiagModuleID_Dsi,
+ NvRmDiagModuleID_Dvc,
+ NvRmDiagModuleID_Sbc,
+ NvRmDiagModuleID_Xio,
+ NvRmDiagModuleID_Spi,
+ NvRmDiagModuleID_NorFlash,
+ NvRmDiagModuleID_Slc,
+ NvRmDiagModuleID_Fuse,
+ NvRmDiagModuleID_Pmc,
+ NvRmDiagModuleID_StatMon,
+ NvRmDiagModuleID_Kbc,
+ NvRmDiagModuleID_Vg,
+ NvRmDiagModuleID_ApbDma,
+ NvRmDiagModuleID_Mc,
+ NvRmDiagModuleID_SpdifIn,
+ NvRmDiagModuleID_Vfir,
+ NvRmDiagModuleID_Cve,
+ NvRmDiagModuleID_ViSensor,
+ NvRmDiagModuleID_SystemReset,
+ NvRmDiagModuleID_AvpUcq,
+ NvRmDiagModuleID_KFuse,
+ NvRmDiagModuleID_OneWire,
+ NvRmDiagModuleID_SyncNor,
+ NvRmDiagModuleID_Pcie,
+ NvRmDiagModuleID_Num,
+ NvRmDiagModuleID_Force32 = 0x7FFFFFFF
+} NvRmDiagModuleID;
+
+/**
+ * Create a diag module id with multiple instances.
+ */
+#define NVRM_DIAG_MODULE( id, instance ) \
+ ((NvRmDiagModuleID)( (instance) << 16 | id ))
+
+/**
+ * Get the module id.
+ */
+#define NVRM_DIAG_MODULE_ID( id ) ((id) & 0xFFFF)
+
+/**
+ * Get the module instance.
+ */
+#define NVRM_DIAG_MODULE_INSTANCE( id ) (((id) >> 16) & 0xFFFF)
+
+/**
+ * Enable/disable support for individual clock diagnostic lock
+ */
+#define NVRM_DIAG_LOCK_SUPPORTED (0)
+
+/**
+ * Append clock configuration flags with diagnostic lock flag
+ */
+#define NvRmClockConfig_DiagLock ((NvRmClockConfigFlags_Num & (~0x01)) << 1)
+
+/**
+ * Defines clock source types
+ */
+
+typedef enum
+{
+
+ /// Clock source with fixed frequency
+ NvRmDiagClockSourceType_Oscillator = 1,
+
+ /// PLL clock source
+ NvRmDiagClockSourceType_Pll,
+
+ /// Clock scaler derives its clock from oscillators, PLLs or other scalers
+ NvRmDiagClockSourceType_Scaler,
+ NvRmDiagClockSourceType_Num,
+ NvRmDiagClockSourceType_Force32 = 0x7FFFFFFF
+} NvRmDiagClockSourceType;
+
+/**
+ * Defines types of clock scalers. Scale coefficient for all clock scalers
+ * is specified as (m, n) pair of 32-bit values. The interpretation of the
+ * m, n values for each type is clarified below.
+ */
+
+typedef enum
+{
+
+ /// No clock scaler: m = n = 1 always
+ NvRmDiagClockScalerType_NoScaler = 1,
+
+ /// Clock divider with m = 1 always, and n = 31.1 format
+ /// with half-step lowest bit
+ NvRmDiagClockScalerType_Divider_1_N,
+
+ /// Clock divider with rational (m+1)/(n+1) coefficient; m and n are
+ /// integeres, scale 1:1 is applied if m >= n
+ NvRmDiagClockScalerType_Divider_M_N,
+
+ /// Clock divider with rational (m+1)/16 coefficient, i.e., n = 16 always;
+ /// m is integer, scale 1:1 is applied if m >= 15 ("keeps" m + 1 clocks
+ /// out of every 16)
+ NvRmDiagClockScalerType_Divider_M_16,
+
+ /// Clock doubler: scale 2:1 if m != 0, scale 1:1 if m = 0,
+ /// n = 1 always
+ NvRmDiagClockScalerType_Doubler,
+ NvRmDiagClockScalerType_Num,
+ NvRmDiagClockScalerType_Force32 = 0x7FFFFFFF
+} NvRmDiagClockScalerType;
+
+/**
+ * Defines RM thermal monitoring zones.
+ */
+
+typedef enum
+{
+
+ /// Specifies ambient temperature zone.
+ NvRmTmonZoneId_Ambient = 1,
+
+ /// Specifies SoC core temperature zone.
+ NvRmTmonZoneId_Core,
+ NvRmTmonZoneId_Num,
+ NvRmTmonZoneId_Force32 = 0x7FFFFFFF
+} NvRmTmonZoneId;
+
+/// Clock source opaque handle (TODO: replace forward idl declaration
+/// of <enum> with forward declaration of <handle>, when it is supported
+typedef struct NvRmClockSourceInfoRec* NvRmDiagClockSourceHandle;
+
+/// Power rail opaque handle
+
+typedef struct NvRmDiagPowerRailRec *NvRmDiagPowerRailHandle;
+
+/**
+ * Enables diagnostic mode (disable is not allowed). Clock, voltage, etc.,
+ * will no longer be controlled by the Resource Manager. The NvRmDiag
+ * interfaces should be used instead.
+ *
+ * @param hDevice The RM device handle.
+ *
+ * @retval NvSuccess if diagnostic mode is successfully enabled.
+ * @retval NvError_InsufficientMemory if failed to allocate memory for
+ * diagnostic mode.
+ */
+
+ NvError NvRmDiagEnable(
+ NvRmDeviceHandle hDevice );
+
+/**
+ * Lists modules present in the chip and available for diagnostic.
+ *
+ * @param pListSize Pointer to the list size. On entry specifies list size
+ * allocated by the client, on exit - actual number of Ids returned. If
+ * entry size is 0, maximum list size is returned.
+ * @param pIdList Pointer to the list of combined module Id/Instance values
+ * to be filled in by this function. Ignored if input list size is 0.
+ *
+ * @retval NvSuccess if the module list is successfully returned.
+ * @retval NvError_NotInitialized if diagnostic mode is not enabled.
+ */
+
+ NvError NvRmDiagListModules(
+ NvU32 * pListSize,
+ NvRmDiagModuleID * pIdList );
+
+/**
+ * Lists available SoC clock sources.
+ *
+ * @param pListSize Pointer to the list size. On entry specifies list size
+ * allocated by the client, on exit - actual number of source handles
+ * returned. If entry size is 0, maximum list size is returned.
+ * @param phSourceList Pointer to the list of source handles to be filled
+ * in by this function. Ignored if input list size is 0.
+ *
+ * @retval NvSuccess if the source list is successfully returned.
+ * @retval NvError_NotInitialized if diagnostic mode is not enabled.
+ */
+
+ NvError NvRmDiagListClockSources(
+ NvU32 * pListSize,
+ NvRmDiagClockSourceHandle * phSourceList );
+
+/**
+ * Lists clock sources for the specified module.
+ *
+ * @param id Combined Id and instance for the target module.
+ * @param pListSize Pointer to the list size. On entry specifies list size
+ * allocated by the client, on exit - actual number of source handles
+ * returned. If entry size is 0, maximum list size is returned.
+ * @param phSourceList Pointer to the list of source handles to be filled
+ * in by this function. Ignored if input list size is 0.
+ *
+ * @retval NvSuccess if the source list is successfully returned.
+ * @retval NvError_NotInitialized if diagnostic mode is not enabled.
+ */
+
+ NvError NvRmDiagModuleListClockSources(
+ NvRmDiagModuleID id,
+ NvU32 * pListSize,
+ NvRmDiagClockSourceHandle * phSourceList );
+
+/**
+ * Enables/Disables specified module clock.
+ *
+ * @param id Combined Id and instance for the target module.
+ * @param enable Requested clock state - enabled if true, disabled if false
+ *
+ * @retval NvSuccess if clock state changed successfully.
+ * @retval NvError_NotInitialized if diagnostic mode is not enabled.
+ */
+
+ NvError NvRmDiagModuleClockEnable(
+ NvRmDiagModuleID id,
+ NvBool enable );
+
+/**
+ * Configures the clock for the specified module.
+ *
+ * @param id Combined Id and instance for the target module.
+ * @param hSource The handle of the clock source to drive the given module.
+ * @param divider 31.1 format: lowest bit is half-step. No range checking.
+ * Half-step bit is ignored if module divider is not fractional. High
+ * bits are silently truncated if the value is out of h/w field range.
+ * @param Source1st If true, clock source is updated 1st, and the divider
+ * is modified after the chip specific delay. If false, the order of update
+ * is the reversed.
+ *
+ * @retval NvSuccess if clock state changed successfully.
+ * @retval NvError_NotInitialized if diagnostic mode is not enabled.
+ */
+
+ NvError NvRmDiagModuleClockConfigure(
+ NvRmDiagModuleID id,
+ NvRmDiagClockSourceHandle hSource,
+ NvU32 divider,
+ NvBool Source1st );
+
+/**
+ * Gets the name of the given clock source..
+ *
+ * @param hSource The target clock source handle.
+ *
+ * @return The 64-bit packed 8-character name of the given clock source. Zero
+ * will be returned if diagnostic mode is not enabled or the source is invalid.
+ */
+
+ NvU64 NvRmDiagClockSourceGetName(
+ NvRmDiagClockSourceHandle hSource );
+
+/**
+ * Gets the type of the given clock source.
+ *
+ * @param hSource The target clock source handle.
+ *
+ * @return The type of the given clock source. Zero will be returned if
+ * diagnostic mode is not enabled or the source is invalid.
+ */
+
+ NvRmDiagClockSourceType NvRmDiagClockSourceGetType(
+ NvRmDiagClockSourceHandle hSource );
+
+/**
+ * Gets the type of the scaler for the given clock source.
+ *
+ * @param hSource The target clock source handle.
+ *
+ * @return The type of the scaler for the given clock source. Zero will be
+ * be returned if diagnostic mode is not enabled or the source is invalid.
+ */
+
+ NvRmDiagClockScalerType NvRmDiagClockSourceGetScaler(
+ NvRmDiagClockSourceHandle hSource );
+
+/**
+ * Lists input clock sources for the specified clock source.
+ * Primary oscillators have no input sources, and always return 0 as
+ * list size. Other sources (secondary sources with fixed frequency,
+ * PLLs and scalers) have 1 + input sources.
+ *
+ * @param hSource The target clock source handle.
+ * @param pListSize Pointer to the list size. On entry specifies list size
+ * allocated by the client, on exit - actual number of source handles
+ * returned. If entry size is 0, maximum list size is returned.
+ * @param phSourceList Pointer to the list of source handles to be filled
+ * in by this function. Ignored if input list size is 0.
+ *
+ * @retval NvSuccess if the source list is successfully returned.
+ * @retval NvError_NotInitialized if diagnostic mode is not enabled.
+ */
+
+ NvError NvRmDiagClockSourceListSources(
+ NvRmDiagClockSourceHandle hSource,
+ NvU32 * pListSize,
+ NvRmDiagClockSourceHandle * phSourceList );
+
+/**
+ * Gets the given oscillator frequency in kHz.
+ *
+ * @param hOscillator The targeted oscillator/fixed frequency source handle.
+ *
+ * @return The oscillator frequency in kHz. Zero will be returned if
+ * diagnostic mode is not enabled or the target source is invalid.
+ */
+
+ NvU32 NvRmDiagOscillatorGetFreq(
+ NvRmDiagClockSourceHandle hOscillator );
+
+/**
+ * Configures given PLL. Switches PLL in bypass mode, changes PLL settings,
+ * waits for PLL stabilization, and switches back to PLL output.
+ *
+ * @param hPll The targeted PLL handle.
+ * @param M Input divider settings (32-bit integer value)
+ * @param N Feedback divider settings (32-bit integer value)
+ * @param P Post divider settings (32-bit integer value)
+ * If either M or N is zero PLL is left disabled and bypassed. Bsides that,
+ * no other M, N, P parameters validation. High bits are silently truncated
+ * if value is out of h/w field range.
+ *
+ * @retval NvSuccess if clock state changed successfully.
+ * @retval NvError_NotInitialized if diagnostic mode is not enabled.
+ */
+
+ NvError NvRmDiagPllConfigure(
+ NvRmDiagClockSourceHandle hPll,
+ NvU32 M,
+ NvU32 N,
+ NvU32 P );
+
+/**
+ * Configures specified clock scaler.
+ *
+ * @param hScaler The targeted Clock Scaler handle.
+ * @param hInput The handle of the input clock source to drive the
+ * targeted scaler.
+ * @param M The dividend in the scaler coefficient (M/N) - 31.1 format:
+ * lowest bit is half-step.
+ * @param N The divisor in the scaler coefficient (M/N) - 31.1 format:
+ * lowest bit is half-step.
+ * No range checking for M, N parameters. Half-step bit is ignored if
+ * the scaler is not fractional. High bits are silently truncated if
+ * the value is out of h/w field range.
+ *
+ * @retval NvSuccess if clock state changed successfully.
+ * @retval NvError_NotInitialized if diagnostic mode is not enabled.
+ */
+
+ NvError NvRmDiagClockScalerConfigure(
+ NvRmDiagClockSourceHandle hScaler,
+ NvRmDiagClockSourceHandle hInput,
+ NvU32 M,
+ NvU32 N );
+
+/**
+ * Resets module.
+ *
+ * @param id Combined Id and instance for the target module.
+ * @param KeepAsserted If true, reset will be kept asserted on exit.
+ * If false, reset is kept asserted for chip specific delay, and
+ * de-asserted on exit.
+ *
+ * @retval NvSuccess if module reset completed successfully.
+ * @retval NvError_NotInitialized if diagnostic mode is not enabled.
+ */
+
+ NvError NvRmDiagModuleReset(
+ NvRmDiagModuleID id,
+ NvBool KeepAsserted );
+
+/**
+ * Lists power rails.
+ *
+ * @param pListSize Pointer to the list size. On entry specifies list size
+ * allocated by the client, on exit - actual number of rail handles
+ * returned. If entry size is 0, maximum list size is returned.
+ * @param phSourceList Pointer to the list of power rail handles to be filled
+ * in by this function. Ignored if input list size is 0.
+ *
+ * @retval NvSuccess if the source list is successfully returned.
+ * @retval NvError_NotInitialized if diagnostic mode is not enabled.
+ */
+
+ NvError NvRmDiagListPowerRails(
+ NvU32 * pListSize,
+ NvRmDiagPowerRailHandle * phRailList );
+
+/**
+ * Gets the name of the given power rail.
+ *
+ * @param hRail The target power rail handle.
+ *
+ * @return The 64-bit packed 8-character name of the given rail. Zero will be
+ * returned if diagnostic mode is not enabled or the rail is invalid.
+ */
+
+ NvU64 NvRmDiagPowerRailGetName(
+ NvRmDiagPowerRailHandle hRail );
+
+/**
+ * Lists power rails for the specified module.
+ *
+ * @param id Combined Id and instance for the target module.
+ * @param pListSize Pointer to the list size. On entry specifies list size
+ * allocated by the client, on exit - actual number of power rail handles
+ * returned. If entry size is 0, maximum list size is returned.
+ * @param phRailList Pointer to the list of source handles to be filled
+ * in by this function. Ignored if input list size is 0.
+ *
+ * @retval NvSuccess if the power rail list is successfully returned.
+ * @retval NvError_NotInitialized if diagnostic mode is not enabled.
+ */
+
+ NvError NvRmDiagModuleListPowerRails(
+ NvRmDiagModuleID id,
+ NvU32 * pListSize,
+ NvRmDiagPowerRailHandle * phRailList );
+
+/**
+ * Configures power rail voltage.
+ *
+ * @param hRail The target power rail handle.
+ * @param VoltageMV The requested voltage level in millivolts.
+ *
+ * @retval NvSuccess if the power rail is successfully configured.
+ * @retval NvError_NotInitialized if diagnostic mode is not enabled.
+ */
+
+ NvError NvRmDiagConfigurePowerRail(
+ NvRmDiagPowerRailHandle hRail,
+ NvU32 VoltageMV );
+
+/**
+ * Verifies support for individual clock diagnostic lock (if supported
+ * clock frequency can be locked when diagnostic mode is disabled).
+ *
+ * @retval NV_TRUE if individual clock diagnostic lock is supported.
+ * @retval NV_FALSE if individual clock diagnostic lock is not supported.
+ */
+
+ NvBool NvRmDiagIsLockSupported(
+ void );
+
+/**
+ * Gets temperature in the specified thermal zone (used for
+ * thermal profiling, does not require diagnostic mode to be enabled)
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param ZoneId The targeted thermal zone ID.
+ * @param pTemperatureC Output storage pointer for zone temperature
+ * (in degrees C).
+ *
+ * @retval NvSuccess if temperature is returned successfully.
+ * @retval NvError_Busy if attempt to access temperature monitoring
+ * device failed.
+ * @retval NvError_NotSupported if the specified zone is not monitored.
+ */
+
+ NvError NvRmDiagGetTemperature(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmTmonZoneId ZoneId,
+ NvS32 * pTemperatureC );
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_dma.h b/arch/arm/mach-tegra/include/nvrm_dma.h
new file mode 100644
index 000000000000..2ff199ae03f5
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_dma.h
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_dma_H
+#define INCLUDED_nvrm_dma_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_module.h"
+#include "nvrm_init.h"
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * DMA Resource manager </b>
+ *
+ * @b Description: Defines the interface to the NvRM DMA.
+ *
+ */
+
+/**
+ * @defgroup nvrm_dma Direct Memory Access (DMA) Controller API
+ *
+ * This is the Dma interface. These API provides the data transfer from memory
+ * to the selected destination and vice versa. The one end is the memory and
+ * other end is the module selected by the dma module Id.
+ * This API allocates the channel based on priority request. Higher priority
+ * channel can not be shared by other dma requestors. The low priority channel
+ * is shared between the different requestors.
+ *
+ * @ingroup nvddk_rm
+ *
+ * @{
+ */
+
+#include "nvos.h"
+
+/**
+ * NvRmDmaHandle is an opaque context to the NvRmDmaRec interface
+ */
+
+typedef struct NvRmDmaRec *NvRmDmaHandle;
+
+/**
+ * @brief Defines the DMA capability structure for getting the capability of
+ * the data transfer and any limitation if the dma manager have.
+ */
+
+typedef struct NvRmDmaCapabilitiesRec
+{
+
+ /// Holds the granularity of the data length for dma transfer in bytes
+ NvU32 DmaGranularitySize;
+
+ /// Holds the information if there is any address alignment limitation
+ /// is available in term of bytes. if this value is 1 then there is no
+ /// limitation, any dma can transfer the data from any address. If this
+ /// value is 2 then the address should be 2 byte aligned always to do
+ /// the dma transfer. If this value is 4
+ /// then the address should be 4 byte aligned always to do the dma
+ /// transfer.
+ NvU32 DmaAddressAlignmentSize;
+} NvRmDmaCapabilities;
+
+/**
+ * @brief Defines the DMA client buffer information which is transferred
+ * recently. The direction of data transfer decides based on this address. The
+ * source address and destination address should be in line with the source
+ * module Id and destination module Id.
+ */
+
+typedef struct NvRmDmaClientBufferRec
+{
+
+ /// Specifies the dma source buffer physical address for dma transfer.
+ NvRmPhysAddr SourceBufferPhyAddress;
+
+ /// Specifies the dma destination buffer physical address for dma transfer.
+ NvRmPhysAddr DestinationBufferPhyAddress;
+
+ /// Source address wrap size in bytes. It tells that after how much bytes,
+ /// it will be wrapped.
+ /// If it is zero then wrapping for source address is disabled.
+ NvU32 SourceAddressWrapSize;
+
+ /// Destination address wrap size in bytes. It tells that after how much
+ /// bytes, it will be wrapped. If it is zero then wrapping for destination
+ /// address is disabled.
+ NvU32 DestinationAddressWrapSize;
+
+ /// Specifies the size of the buffer in bytes which is requested for
+ /// transfer.
+ NvU32 TransferSize;
+} NvRmDmaClientBuffer;
+
+/**
+ * @brief Specify the name of modules which can be supported by nvrm dma
+ * drivers. These dma modules can be either source or destination based on
+ * direction.
+ */
+
+typedef enum
+{
+
+ /// Specifies the dma module Id as Invalid
+ NvRmDmaModuleID_Invalid = 0x0,
+
+ /// Specifies the dma module Id for memory
+ NvRmDmaModuleID_Memory,
+
+ /// Specifies the dma module Id for I2s controller.
+ NvRmDmaModuleID_I2s,
+
+ /// Specifies the dma module Id for Ac97 controller.
+ NvRmDmaModuleID_Ac97,
+
+ /// Specifies the dma module Id for Spdif controller.
+ NvRmDmaModuleID_Spdif,
+
+ /// Specifies the dma module Id for uart controller.
+ NvRmDmaModuleID_Uart,
+
+ /// Specifies the dma module Id for Vfir controller.
+ NvRmDmaModuleID_Vfir,
+
+ /// Specifies the dma module Id for Mipi controller.
+ NvRmDmaModuleID_Mipi,
+
+ /// Specifies the dma module Id for spi controller.
+ NvRmDmaModuleID_Spi,
+
+ /// Specifies the dma module Id for slink controller.
+ NvRmDmaModuleID_Slink,
+
+ /// Specifies the dma module Id for I2c controller.
+ NvRmDmaModuleID_I2c,
+
+ /// Specifies the dma module Id for Dvc I2c controller.
+ NvRmDmaModuleID_Dvc,
+
+ /// Specifies the maximum number of modules supported.
+ NvRmDmaModuleID_Max,
+ NvRmDmaModuleID_Num,
+ NvRmDmaModuleID_Force32 = 0x7FFFFFFF
+} NvRmDmaModuleID;
+
+/**
+ * @brief Specify the direction of the transfer, either outbound data
+ * (source -> dest) or inboud data (source <- dest)
+ */
+
+typedef enum
+{
+
+ /// Specifies the direction of the transfer to be srcdevice -> dstdevice
+ NvRmDmaDirection_Forward = 0x1,
+
+ /// Specifies the direction of the transfer to be dstdevice -> srcdevice
+ NvRmDmaDirection_Reverse,
+ NvRmDmaDirection_Num,
+ NvRmDmaDirection_Force32 = 0x7FFFFFFF
+} NvRmDmaDirection;
+
+/**
+ * @brief Specify the priority of the dma either low priority or high priority.
+ */
+
+typedef enum
+{
+
+ /// Low priority DMA, no guarantee of latency to start transactions
+ NvRmDmaPriority_Low = 0x1,
+
+ /// High priority DMA guarantees the first buffer you send the
+ /// NvRmDmaStartDmaTransfer() will begin immediately.
+ NvRmDmaPriority_High,
+ NvRmDmaPriority_Num,
+ NvRmDmaPriority_Force32 = 0x7FFFFFFF
+} NvRmDmaPriority;
+
+/**
+ * @brief Get the capabilities of the dma channels.
+ *
+ * @param hDevice Handle to RM device.
+ * @param pRmDmaCaps Pointer to the capability structure where the cpas value
+ * will be stored.
+ *
+ * @retval NvSuccess Indicates the function completed successfully.
+ */
+
+ NvError NvRmDmaGetCapabilities(
+ NvRmDeviceHandle hDevice,
+ NvRmDmaCapabilities * pRmDmaCaps );
+
+/**
+ * @brief Allocate the DMA channel for the data transfer. The dma is allocated
+ * based on the dma device Id information. Most of the configuration is also
+ * done based on the source/destination device Id during the channel
+ * allocation. It initializes the channel also with standard configuration
+ * based on source/ destination device. The data is transferred from memory to
+ * the dma requestor device or vice versa. The dma requestors device can be
+ * memory or any peripheral device listed in the NvRmDmaDeviceId.
+ *
+ * Assert encountered in debug mode if passed parameter is invalid.
+ *
+ * @param hRmDevice Handle to RM device.
+ * @param phDma Pointer to the dma handle where the allocated dma handle
+ * will be stored.
+ * @param Enable32bitSwap if set to NV_TRUE will unconditionally reverse the
+ * memory order of bytes on 4-byte chunks. D3:D2:D1:D0 becomes D0:D1:D2:D3
+ * @param Priority Selects either Hi or Low priority. A Low priority
+ * allocation will only fail if the system is out of memory, and transfers on a
+ * Low priority channel will be intermixed with other clients of that channel.
+ * Hi priority allocations may fail if there is not a dedicated channel
+ * available for the Hi priority client. Hi priority channels should only be
+ * used if you have very specific latency requirements.
+ * @param DmaRequestorModuleId Specifies a source module Id.
+ * @param DmaRequestorInstanceId Specifies the instance of the source module.
+ *
+ * @retval NvSuccess Indicates the function completed successfully.
+ * @retval NvDMAChannelNotAvailable Indicates that there is no channel
+ * available for allocation.
+ * @retval NvError_InsufficientMemory Indicates that it will not able to
+ * allocate the memory for dma handles.
+ * @retval NvDMAInvalidSourceId Indicates that device requested is not the
+ * valid device.
+ * @retval NvError_MemoryMapFailed Indicates that the memory mapping for
+ * controller register failed.
+ * @retval NvError_MutexCreateFailed Indicates that the creation of mutex
+ * failed. Mutex is required to provide the thread safety.
+ * @retval NvError_SemaphoreCreateFailed Indicates that the creation of
+ * semaphore failed. Semaphore is required to provide the synchronization and
+ * also used in synchronous operation.
+ *
+ */
+
+ NvError NvRmDmaAllocate(
+ NvRmDeviceHandle hRmDevice,
+ NvRmDmaHandle * phDma,
+ NvBool Enable32bitSwap,
+ NvRmDmaPriority Priority,
+ NvRmDmaModuleID DmaRequestorModuleId,
+ NvU32 DmaRequestorInstanceId );
+
+/**
+ * Frees the channel so that it can be reused by other clients. This function
+ * will block until all currently enqueued transfers complete.
+ *
+ * @note: We may change the functionality so that Free() returns immediately
+ * but internally the channel remains in an alloc'd state until all transfers
+ * complete.
+ *
+ * @param hDma A DMA handle from NvRmDmaAllocate. If hDma is NULL, this API has
+ * no effect.
+ */
+
+ void NvRmDmaFree(
+ NvRmDmaHandle hDma );
+
+/**
+ * @brief Starts the DMA channel for data transfer.
+ *
+ * Assert encountered in debug mode if passed parameter is invalid.
+ *
+ * @param hDma Specifies a DMA handle which is allocated by the Rm dma from
+ * NvRmDmaAllocate.
+ * @param pClientBuffer Specifies a pointer to the client information which
+ * contains the start buffer, destination buffer, and number of bytes
+ * transferred.
+ * @param DmaDirection Specifies whether the transfer is Forward src->dst or
+ * Reverse dst->src direction.
+ * @param WaitTimeoutInMilliSecond The time need to wait in milliseconds. If it
+ * is zero then it will be returned immediately as asynchronous operation. If
+ * is non zero then it will wait for a requested timeout. If it is
+ * NV_WAIT_INFINITE then it will wait for infinitely till transaction
+ * completes.
+ * @param AsynchSemaphoreId The semaphore Id which need to be signal if client
+ * is requested for asynchronous operation. Pass NULL if not semaphore should
+ * be signalled when the transfer is complete.
+ *
+ * @retval NvSuccess Indicates the function completed successfully.
+ * @retval NvError_InvalidAddress Indicates that the address for source or
+ * destination is invalid.
+ * @retval NvError_InvalidSize Indicates that the bytes requested is invalid.
+ * @retval NvError_Timeout Indicates that transfer is not completed in a
+ * expected time and timeout happen.
+ */
+
+ NvError NvRmDmaStartDmaTransfer(
+ NvRmDmaHandle hDma,
+ NvRmDmaClientBuffer * pClientBuffer,
+ NvRmDmaDirection DmaDirection,
+ NvU32 WaitTimeoutInMilliSecond,
+ NvOsSemaphoreHandle AsynchSemaphoreId );
+
+/**
+ * @brief Aborts the currently running transfer as well as any other transfers
+ * that are queued up behind the currently running transfer.
+ *
+ * @param hDma Specifies a DMA handle which is allocated by the Rm dma from
+ * NvRmDmaAllocate.
+ */
+
+ void NvRmDmaAbort(
+ NvRmDmaHandle hDma );
+
+/**
+ * @brief Get the number of bytes transferred by the dma in current tranaction
+ * from the last.
+ *
+ * This will tell the number of bytes has been transferred by the dma yet from
+ * the last transfer completes.
+ *
+ * @param hDma Specifies a DMA handle which is allocated by the Rm dma from
+ * NvRmDmaAllocate.
+ * @param pTransferCount Pointer to the variable where number of bytes transferred
+ * by dma will be stored.
+ * @param IsTransferStop Tells whether the current transfer is stopped or not.
+ *
+ * @retval NvSuccess Indicates the function completed successfully.
+ * @retval NvError_InvalidState The transfer is not going on.
+ */
+
+ NvError NvRmDmaGetTransferredCount(
+ NvRmDmaHandle hDma,
+ NvU32 * pTransferCount,
+ NvBool IsTransferStop );
+
+/**
+ * @brief Tells whether the transfer is completed or not for the given dma transfer.
+ *
+ * This will tells the first or second half of the buffer transfer for the requestor
+ * who uses the double buffering mechanism like i2s.
+ *
+ * @param hDma Specifies a DMA handle which is allocated by the Rm dma from
+ * NvRmDmaAllocate.
+ * @param IsFirstHalfBuffer Tells whether the first half or second half of the dma transfer.
+ *
+ * @retval NV_TRUE indicates that the transfre has been completed.
+ * @retval NV_FALSE Indicates that the transfre is going on.
+ */
+
+ NvBool NvRmDmaIsDmaTransferCompletes(
+ NvRmDmaHandle hDma,
+ NvBool IsFirstHalfBuffer );
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_drf.h b/arch/arm/mach-tegra/include/nvrm_drf.h
new file mode 100644
index 000000000000..cc5cbe0cb4a3
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_drf.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_DRF_H
+#define INCLUDED_NVRM_DRF_H
+
+/**
+ * @defgroup nvrm_drf RM DRF Macros
+ *
+ * @ingroup nvddk_rm
+ *
+ * The following suite of macros are used for generating values to write into
+ * hardware registers, or for extracting fields from read registers. The
+ * hardware headers have a RANGE define for each field in the register in the
+ * form of x:y, 'x' being the high bit, 'y' the lower. Through a clever use
+ * of the C ternary operator, x:y may be passed into the macros below to
+ * geneate masks, shift values, etc.
+ *
+ * There are two basic flavors of DRF macros, the first is used to define
+ * a new register value from 0, the other is modifiying a field given a
+ * register value. An example of the first:
+ *
+ * reg = NV_DRF_DEF( HW, REGISTER0, FIELD0, VALUE0 )
+ * | NV_DRF_DEF( HW, REGISTER0, FIELD3, VALUE2 );
+ *
+ * To modify 'reg' from the previous example:
+ *
+ * reg = NV_FLD_SET_DRF_DEF( HW, REGISTER0, FIELD2, VALUE1, reg );
+ *
+ * To pass in numeric values instead of defined values from the header:
+ *
+ * reg = NV_DRF_NUM( HW, REGISTER3, FIELD2, 1024 );
+ *
+ * To read a value from a register:
+ *
+ * val = NV_DRF_VAL( HW, REGISTER3, FIELD2, reg );
+ *
+ * Some registers have non-zero reset values which may be extracted from the
+ * hardware headers via NV_RESETVAL.
+ */
+
+/*
+ * The NV_FIELD_* macros are helper macros for the public NV_DRF_* macros.
+ */
+#define NV_FIELD_LOWBIT(x) (0?x)
+#define NV_FIELD_HIGHBIT(x) (1?x)
+#define NV_FIELD_SIZE(x) (NV_FIELD_HIGHBIT(x)-NV_FIELD_LOWBIT(x)+1)
+#define NV_FIELD_SHIFT(x) ((0?x)%32)
+#define NV_FIELD_MASK(x) (0xFFFFFFFFUL>>(31-((1?x)%32)+((0?x)%32)))
+#define NV_FIELD_BITS(val, x) (((val) & NV_FIELD_MASK(x))<<NV_FIELD_SHIFT(x))
+#define NV_FIELD_SHIFTMASK(x) (NV_FIELD_MASK(x)<< (NV_FIELD_SHIFT(x)))
+
+/** NV_DRF_DEF - define a new register value.
+
+ @ingroup nvrm_drf
+
+ @param d register domain (hardware block)
+ @param r register name
+ @param f register field
+ @param c defined value for the field
+ */
+#define NV_DRF_DEF(d,r,f,c) \
+ ((d##_##r##_0_##f##_##c) << NV_FIELD_SHIFT(d##_##r##_0_##f##_RANGE))
+
+/** NV_DRF_NUM - define a new register value.
+
+ @ingroup nvrm_drf
+
+ @param d register domain (hardware block)
+ @param r register name
+ @param f register field
+ @param n numeric value for the field
+ */
+#define NV_DRF_NUM(d,r,f,n) \
+ (((n)& NV_FIELD_MASK(d##_##r##_0_##f##_RANGE)) << \
+ NV_FIELD_SHIFT(d##_##r##_0_##f##_RANGE))
+
+/** NV_DRF_VAL - read a field from a register.
+
+ @ingroup nvrm_drf
+
+ @param d register domain (hardware block)
+ @param r register name
+ @param f register field
+ @param v register value
+ */
+#define NV_DRF_VAL(d,r,f,v) \
+ (((v)>> NV_FIELD_SHIFT(d##_##r##_0_##f##_RANGE)) & \
+ NV_FIELD_MASK(d##_##r##_0_##f##_RANGE))
+
+/** NV_FLD_SET_DRF_NUM - modify a register field.
+
+ @ingroup nvrm_drf
+
+ @param d register domain (hardware block)
+ @param r register name
+ @param f register field
+ @param n numeric field value
+ @param v register value
+ */
+#define NV_FLD_SET_DRF_NUM(d,r,f,n,v) \
+ ((v & ~NV_FIELD_SHIFTMASK(d##_##r##_0_##f##_RANGE)) | NV_DRF_NUM(d,r,f,n))
+
+/** NV_FLD_SET_DRF_DEF - modify a register field.
+
+ @ingroup nvrm_drf
+
+ @param d register domain (hardware block)
+ @param r register name
+ @param f register field
+ @param c defined field value
+ @param v register value
+ */
+#define NV_FLD_SET_DRF_DEF(d,r,f,c,v) \
+ (((v) & ~NV_FIELD_SHIFTMASK(d##_##r##_0_##f##_RANGE)) | \
+ NV_DRF_DEF(d,r,f,c))
+
+/** NV_RESETVAL - get the reset value for a register.
+
+ @ingroup nvrm_drf
+
+ @param d register domain (hardware block)
+ @param r register name
+ */
+#define NV_RESETVAL(d,r) (d##_##r##_0_RESET_VAL)
+
+#endif // INCLUDED_NVRM_DRF_H
diff --git a/arch/arm/mach-tegra/include/nvrm_gpio.h b/arch/arm/mach-tegra/include/nvrm_gpio.h
new file mode 100644
index 000000000000..62a711638621
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_gpio.h
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_gpio_H
+#define INCLUDED_nvrm_gpio_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_init.h"
+
+
+/** @file
+ * @brief <b>NVIDIA Driver Development Kit NvRm gpio APIs</b>
+ *
+ * @b Description: Declares Interface for NvRm gpio module.
+ */
+
+ /**
+ * @defgroup nvrm_gpio RM GPIO Services
+ *
+ * This is the Resource Manager interface to general-purpose input-output
+ * (GPIO) services. Fundamental abstraction of this API is a "pin handle", which
+ * of type NvRmGpioPinHandle. A Pin handle is acquired by making a call to
+ * NvRmGpioAcquirePinHandle API. This API returns a pin handle which is
+ * subsequently used by the rest of the GPIO APIs.
+ *
+ * @ingroup nvddk_rm
+ * @{
+ */
+
+#include "nvcommon.h"
+#include "nvos.h"
+
+/**
+ * NvRmGpioHandle is an opaque handle to the GPIO device on the chip.
+ */
+
+typedef struct NvRmGpioRec *NvRmGpioHandle;
+
+/**
+ * @brief GPIO pin handle which describes the physical pin. This values should
+ * not be cached or hardcoded by the drivers. This can vary from chip to chip
+ * and board to board.
+ */
+
+typedef NvU32 NvRmGpioPinHandle;
+
+/**
+ * @brief Defines the possible gpio pin modes.
+ */
+
+typedef enum
+{
+
+ /**
+ * Specifies the gpio pin as not in use. When in this state, the RM or
+ * ODM Kit may park the pin in a board-specific state in order to
+ * minimize leakage current.
+ */
+ NvRmGpioPinMode_Inactive = 1,
+
+ /// Specifies the gpio pin mode as input and enable interrupt for level low.
+ NvRmGpioPinMode_InputInterruptLow,
+
+ /// Specifies the gpio pin mode as input and enable interrupt for level high.
+ NvRmGpioPinMode_InputInterruptHigh,
+
+ /// Specifies the gpio pin mode as input and no interrupt configured.
+ NvRmGpioPinMode_InputData,
+
+ /// Specifies the gpio pin mode as output.
+ NvRmGpioPinMode_Output,
+
+ /// Specifies the gpio pin mode as a special function.
+ NvRmGpioPinMode_Function,
+
+ /// Specifies the gpio pin as input and interrupt configured to any edge.
+ /// i.e seamphore will be signaled for both the rising and failling edges.
+ NvRmGpioPinMode_InputInterruptAny,
+
+ /// Sepciifed the gpio pin a input and interrupt configured to rising edge.
+ NvRmGpioPinMode_InputInterruptRisingEdge,
+
+ /// Sepciifed the gpio pin a input and interrupt configured to falling edge.
+ NvRmGpioPinMode_InputInterruptFallingEdge,
+ NvRmGpioPinMode_Num,
+ NvRmGpioPinMode_Force32 = 0x7FFFFFFF
+} NvRmGpioPinMode;
+
+/**
+ * @brief Defines the pin state
+ */
+
+typedef enum
+{
+
+ // Pin state high
+ NvRmGpioPinState_Low = 0,
+
+ // Pin is high
+ NvRmGpioPinState_High,
+
+ // Pin is in tri state
+ NvRmGpioPinState_TriState,
+ NvRmGpioPinState_Num,
+ NvRmGpioPinState_Force32 = 0x7FFFFFFF
+} NvRmGpioPinState;
+
+// Gnerates a contruct the pin handle till the NvRmGpioAcquirePinHandle
+// API is implemented.
+#define GPIO_MAKE_PIN_HANDLE(inst, port, pin) (0x80000000 | (((NvU32)(pin) & 0xFF)) | (((NvU32)(port) & 0xff) << 8) | (((NvU32)(inst) & 0xff )<< 16))
+#define NVRM_GPIO_CAMERA_PORT (0xfe)
+#define NVRM_GPIO_CAMERA_INST (0xfe)
+
+/**
+ * Creates and opens a GPIO handle. The handle can then be used to
+ * access GPIO functions.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param phGpio Specifies a pointer to the gpio handle where the
+ * allocated handle is stored. The memory for handle is allocated
+ * inside this API.
+ *
+ * @retval NvSuccess gpio initialization is successful.
+ */
+
+ NvError NvRmGpioOpen(
+ NvRmDeviceHandle hRmDevice,
+ NvRmGpioHandle * phGpio );
+
+/**
+ * Closes the GPIO handle. Any pin settings made while this handle was
+ * open will remain. All events enabled by this handle will be
+ * disabled.
+ *
+ * @param hGpio A handle from NvRmGpioOpen(). If hGpio is NULL, this API does
+ * nothing.
+ */
+
+ void NvRmGpioClose(
+ NvRmGpioHandle hGpio );
+
+/** Get NvRmGpioPinHandle from the physical port and pin number. If a driver
+ * acquires a pin handle another driver will not be able to use this until the
+ * pin is released.
+ *
+ * @param hGpio A handle from NvRmGpioOpen().
+ * @param port Physical gpio ports which are chip specific.
+ * @param pinNumber pin number in that port.
+ * @param phGpioPin Pointer to the GPIO pin handle.
+ */
+
+ NvError NvRmGpioAcquirePinHandle(
+ NvRmGpioHandle hGpio,
+ NvU32 port,
+ NvU32 pin,
+ NvRmGpioPinHandle * phPin );
+
+/** Releases the pin handles acquired by NvRmGpioAcquirePinHandle API.
+ *
+ * @param hGpio A handle got from NvRmGpioOpen().
+ * @param hPin Array of pin handles got from NvRmGpioAcquirePinHandle().
+ * @param pinCount Size of pin handles array.
+ */
+
+ void NvRmGpioReleasePinHandles(
+ NvRmGpioHandle hGpio,
+ NvRmGpioPinHandle * hPin,
+ NvU32 pinCount );
+
+/**
+ * Sets the state of array of pins.
+ *
+ * NOTE: When multiple pins are specified (pinCount is greater than
+ * one), ODMs should not make assumptions about the order in which
+ * pins are updated. The implementation will attempt to coalesce
+ * updates to occur atomically; however, this can not be guaranteed in
+ * all cases, and may not occur if the list of pins includes pins from
+ * multiple ports.
+ *
+ * @param hGpio Specifies the gpio handle.
+ * @param pin Array of pin handles.
+ * @param pinState Array of elements specifying the pin state (of type
+ * NvRmGpioPinState).
+ * @param pinCount Number of elements in the array.
+ */
+
+ void NvRmGpioWritePins(
+ NvRmGpioHandle hGpio,
+ NvRmGpioPinHandle * pin,
+ NvRmGpioPinState * pinState,
+ NvU32 pinCount );
+
+/**
+ * Reads the state of array of pins.
+ *
+ * @param hGpio The gpio handle.
+ * @param pin Array of pin handles.
+ * @param pinState Array of elements specifying the pin state (of type
+ * NvRmGpioPinState).
+ * @param pinCount Number of elements in the array.
+ */
+
+ void NvRmGpioReadPins(
+ NvRmGpioHandle hGpio,
+ NvRmGpioPinHandle * pin,
+ NvRmGpioPinState * pPinState,
+ NvU32 pinCount );
+
+/**
+ * Configures a set of GPIO pins to a specified mode. Don't use this API for
+ * the interrupt modes. For interrupt modes, use NvRmGpioInterruptRegister and
+ * NvRmGpioInterruptUnregister APIs.
+ *
+ * @param hGpio The gpio handle.
+ * @param pin Pin handle array returned by a calls to NvRmGpioAcquirePinHandle()
+ * @param pinCount Number elements in the pin handle array.
+ *
+ * @param Mode Pin mode of type NvRmGpioPinMode.
+ *
+ *
+ * @retval NvSuccess requested operation is successful.
+ */
+
+ NvError NvRmGpioConfigPins(
+ NvRmGpioHandle hGpio,
+ NvRmGpioPinHandle * pin,
+ NvU32 pinCount,
+ NvRmGpioPinMode Mode );
+
+/*
+ * Get the IRQs associated with the pin handles. So that the client can
+ * register the interrupt callback for that using interrupt APIs
+ */
+
+ NvError NvRmGpioGetIrqs(
+ NvRmDeviceHandle hRmDevice,
+ NvRmGpioPinHandle * pin,
+ NvU32 * Irq,
+ NvU32 pinCount );
+
+/**
+ * Opaque handle to the GPIO interrupt.
+ */
+
+typedef struct NvRmGpioInterruptRec *NvRmGpioInterruptHandle;
+
+
+/* NOTE: Use the 2 APIs below to configure the gpios to interrupt mode and to
+ * have callabck functions. For the test case of how to use this APIs refer to
+ * the nvrm_gpio_unit_test applicaiton.
+ *
+ * Since the ISR is written by the clients of the API, care should be taken to
+ * clear the interrupt before the ISR is returned. If one fails to do that,
+ * interrupt will be triggered soon after the ISR returns.
+ */
+
+/**
+ * Registers an interrupt callback function and the mode of interrupt for the
+ * gpio pin specified.
+ *
+ * Callback will be using the interrupt thread an the interrupt stack on linux
+ * and IST on wince. So, care should be taken on what APIs can be used on the
+ * callback function. Not all the nvos functions are available in the interrupt
+ * context. Check the nvos.h header file for the list of the functions available.
+ * When the callback is called, the interrupt on the pin is disabled. As soon as
+ * the callback exists, the interrupt is re-enabled. So, external interrupts
+ * should be cleared and then only the callback should be returned.
+ *
+ * @param hGpio The gpio handle.
+ * @param hRm The RM device handle.
+ * @param hPin The handle to a GPIO pin.
+ * @param Callback Callback function which will be caused when the interrupt
+ * triggers.
+ * @param Mode Interrupt mode. See @NvRmGpioPinMode
+ * @param CallbackArg Argument used when the callback is called by the ISR.
+ * @param hGpioInterrupt Interrupt handle for this registered intterrupt. This
+ * handle should be used while calling NvRmGpioInterruptUnregister for
+ * unregistering the interrupt.
+ * @param DebounceTime The debounce time in milliseconds
+ * @retval NvSuccess requested operation is successful.
+ */
+NvError
+NvRmGpioInterruptRegister(
+ NvRmGpioHandle hGpio,
+ NvRmDeviceHandle hRm,
+ NvRmGpioPinHandle hPin,
+ NvOsInterruptHandler Callback,
+ NvRmGpioPinMode Mode,
+ void *CallbackArg,
+ NvRmGpioInterruptHandle *hGpioInterrupt,
+ NvU32 DebounceTime);
+
+/**
+ * Unregister the GPIO interrupt handler.
+ *
+ * @param hGpio The gpio handle.
+ * @param hRm The RM device handle.
+ * @param handle The interrupt handle returned by a successfull call to the
+ * NvRmGpioInterruptRegister API.
+ *
+ */
+void
+NvRmGpioInterruptUnregister(
+ NvRmGpioHandle hGpio,
+ NvRmDeviceHandle hRm,
+ NvRmGpioInterruptHandle handle);
+
+/**
+ * Enable the GPIO interrupt handler.
+ *
+ * @param handle The interrupt handle returned by a successfull call to the
+ * NvRmGpioInterruptRegister API.
+ *
+ * @retval "NvError_BadParameter" if handle is not valid
+ * @retval "NvError_InsufficientMemory" if interupt enable failed.
+ * @retval "NvSuccess" if registration is successfull.
+*/
+NvError
+NvRmGpioInterruptEnable(NvRmGpioInterruptHandle handle);
+
+/*
+ * Callback used to re-enable the interrupts.
+ *
+ * @param handle The interrupt handle returned by a successfull call to the
+ * NvRmGpioInterruptRegister API.
+ */
+void
+NvRmGpioInterruptDone( NvRmGpioInterruptHandle handle );
+
+
+
+/**
+ * Mask/Unmask a gpio interrupt.
+ *
+ * Drivers can use this API to fend off interrupts. Mask means interrupts are
+ * not forwarded to the CPU. Unmask means, interrupts are forwarded to the CPU.
+ * In case of SMP systems, this API masks the interrutps to all the CPU, not
+ * just the calling CPU.
+ *
+ *
+ * @param handle Interrupt handle returned by NvRmGpioInterruptRegister API.
+ * @param mask NV_FALSE to forrward the interrupt to CPU. NV_TRUE to
+ * mask the interupts to CPU.
+ */
+void
+NvRmGpioInterruptMask(NvRmGpioInterruptHandle hGpioInterrupt, NvBool mask);
+
+
+/** @} */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_hardware_access.h b/arch/arm/mach-tegra/include/nvrm_hardware_access.h
new file mode 100644
index 000000000000..497fd94e57a1
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_hardware_access.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_HARDWARE_ACCESS_H
+#define INCLUDED_NVRM_HARDWARE_ACCESS_H
+
+#include "nvcommon.h"
+#include "nvrm_init.h"
+#include "nvos.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+#if !defined(NV_OAL)
+#define NV_OAL 0
+#endif
+
+// By default, sim is supported on WinXP/x86 and Linux/x86 builds only.
+#if !defined(NV_DEF_ENVIRONMENT_SUPPORTS_SIM)
+#if NVCPU_IS_X86 && ((NVOS_IS_WINDOWS && !NVOS_IS_WINDOWS_CE) || NVOS_IS_LINUX) && !NV_OAL
+#define NV_DEF_ENVIRONMENT_SUPPORTS_SIM 1
+#else
+#define NV_DEF_ENVIRONMENT_SUPPORTS_SIM 0
+#endif
+#endif
+
+/**
+ * NV_WRITE* and NV_READ* - low level read/write api to hardware.
+ *
+ * These macros should be used to read and write registers and memory
+ * in NvDDKs so that the DDK will work on simulation and real hardware
+ * with no changes.
+ *
+ * This is for hardware modules that are NOT behind the host. Modules that
+ * are behind the host should use nvrm_channel.h.
+ *
+ * A DDK can obtain a mapping to its registers by using the
+ * NvRmPhysicalMemMap() function. This mapping is always uncached. The
+ * resulting pointer can then be used with NV_READ and NV_WRITE.
+ */
+
+/*
+ * Maps the given physical address to the user's virtual address space.
+ *
+ * @param phys The physical address to map into the virtual address space
+ * @param size The size of the mapping
+ * @param flags Any flags for the mapping -- exactly match's NVOS_MAP_*
+ * @param memType The memory mapping to use (uncached, write-combined, etc.)
+ * @param ptr Output -- the resulting virtual pointer
+ */
+// FIXME: NvOs needs to take this up, however I think this is more
+// complex than just mapping. E.G. does it map into the kernel vaddr, or
+// the current process vaddr? And how does this work on windows and
+// windows-ce?
+NvError NvRmPhysicalMemMap(NvRmPhysAddr phys, size_t size, NvU32 flags,
+ NvOsMemAttribute memType, void **ptr );
+
+/*
+ * Unmaps the given virtual address from NvRmPhysicalMemMap.
+ */
+void NvRmPhysicalMemUnmap(void *ptr, size_t size);
+
+/**
+ * NV_WRITE[8|16|32|64] - Writes N data bits to hardware.
+ *
+ * @param a The address to write.
+ * @param d The data to write.
+ */
+
+/**
+ * NV_READ[8|16|32|64] - Reads N bits from hardware.
+ *
+ * @param a The address to read.
+ */
+
+void NvWrite08(void *addr, NvU8 data);
+void NvWrite16(void *addr, NvU16 data);
+void NvWrite32(void *addr, NvU32 data);
+void NvWrite64(void *addr, NvU64 data);
+NvU8 NvRead08(void *addr);
+NvU16 NvRead16(void *addr);
+NvU32 NvRead32(void *addr);
+NvU64 NvRead64(void *addr);
+void NvWriteBlk(void *dst, const void *src, NvU32 length);
+void NvReadBlk(void *dst, const void *src, NvU32 length);
+
+#if NV_DEF_ENVIRONMENT_SUPPORTS_SIM == 1
+
+#define NV_WRITE08(a,d) NvWrite08((void *)(a),(d))
+#define NV_WRITE16(a,d) NvWrite16((void *)(a),(d))
+#define NV_WRITE32(a,d) NvWrite32((void *)(a),(d))
+#define NV_WRITE64(a,d) NvWrite64((void *)(a),(d))
+#define NV_READ8(a) NvRead08((void *)(a))
+#define NV_READ16(a) NvRead16((void *)(a))
+#define NV_READ32(a) NvRead32((void *)(a))
+#define NV_READ64(a) NvRead64((void *)(a))
+#define NV_WRITE(dst, src, len) NvWriteBlk(dst, src, len)
+#define NV_READ(dst, src, len) NvReadBlk(dst, src, len)
+
+#else
+/* connected to hardware */
+
+#define NV_WRITE08(a,d) *((volatile NvU8 *)(a)) = (d)
+#define NV_WRITE16(a,d) *((volatile NvU16 *)(a)) = (d)
+#define NV_WRITE32(a,d) *((volatile NvU32 *)(a)) = (d)
+#define NV_WRITE64(a,d) *((volatile NvU64 *)(a)) = (d)
+#define NV_READ8(a) *((const volatile NvU8 *)(a))
+#define NV_READ16(a) *((const volatile NvU16 *)(a))
+#define NV_READ32(a) *((const volatile NvU32 *)(a))
+#define NV_READ64(a) *((const volatile NvU64 *)(a))
+#define NV_WRITE(dst, src, len) NvOsMemcpy(dst, src, len)
+#define NV_READ(dst, src, len) NvOsMemcpy(dst, src, len)
+
+#endif // !hardware
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // INCLUDED_NVRM_HARDWARE_ACCESS_H
diff --git a/arch/arm/mach-tegra/include/nvrm_i2c.h b/arch/arm/mach-tegra/include/nvrm_i2c.h
new file mode 100644
index 000000000000..5cc245def79b
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_i2c.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_i2c_H
+#define INCLUDED_nvrm_i2c_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_pinmux.h"
+#include "nvrm_module.h"
+#include "nvrm_init.h"
+
+#include "nvos.h"
+#include "nvcommon.h"
+
+/**
+ * NvRmI2cHandle is an opaque handle to the NvRmI2cStructRec interface
+ */
+
+typedef struct NvRmI2cRec *NvRmI2cHandle;
+
+/**
+ * @brief Defines the I2C capability structure. It contains the
+ * capabilities/limitations (like maximum bytes transferred,
+ * supported clock speed) of the hardware.
+ */
+
+typedef struct NvRmI2cCapabilitiesRec
+{
+
+ /**
+ * Maximum number of packet length in bytes which can be transferred
+ * between start and the stop pulses.
+ */
+ NvU32 MaximumPacketLengthInBytes;
+
+ /// Maximum speed which I2C controller can support.
+ NvU32 MaximumClockSpeed;
+
+ /// Minimum speed which I2C controller can support.
+ NvU32 MinimumClockSpeed;
+} NvRmI2cCapabilities;
+
+/**
+ * @brief Initializes and opens the i2c channel. This function allocates the
+ * handle for the i2c channel and provides it to the client.
+ *
+ * Assert encountered in debug mode if passed parameter is invalid.
+ *
+ * @param hDevice Handle to the Rm device which is required by Rm to acquire
+ * the resources from RM.
+ * @param IoModule The IO module to set, it is either NvOdmIoModule_I2c
+ * or NvOdmIoModule_I2c_Pmu
+ * @param instance Instance of the i2c driver to be opened.
+ * @param phI2c Points to the location where the I2C handle shall be stored.
+ *
+ * @retval NvSuccess Indicates that the I2c channel has successfully opened.
+ * @retval NvError_InsufficientMemory Indicates that function fails to allocate
+ * the memory.
+ * @retval NvError_NotInitialized Indicates the I2c initialization failed.
+ */
+
+ NvError NvRmI2cOpen(
+ NvRmDeviceHandle hDevice,
+ NvU32 IoModule,
+ NvU32 instance,
+ NvRmI2cHandle * phI2c );
+
+/**
+ * @brief Closes the i2c channel. This function frees the memory allocated for
+ * the i2c handle for the i2c channel.
+ * This function de-initializes the i2c channel. This API never fails.
+ *
+ * @param hI2c A handle from NvRmI2cOpen(). If hI2c is NULL, this API does
+ * nothing.
+ */
+
+ void NvRmI2cClose(
+ NvRmI2cHandle hI2c );
+
+// Maximum number of bytes that can be sent between the i2c start and stop conditions
+#define NVRM_I2C_PACKETSIZE (8)
+
+// Maximum number of bytes that can be sent between the i2c start and repeat start condition.
+#define NVRM_I2C_PACKETSIZE_WITH_NOSTOP (4)
+
+/// Indicates a I2C read transaction.
+#define NVRM_I2C_READ (0x1)
+
+/// Indicates that it is a write transaction
+#define NVRM_I2C_WRITE (0x2)
+
+/// Indicates that there is no STOP following this transaction. This also implies
+/// that there is always one more transaction following a transaction with
+/// NVRM_I2C_NOSTOP attribute.
+#define NVRM_I2C_NOSTOP (0x4)
+
+// Some devices doesn't support ACK. By, setting this flag, master will not
+// expect the generation of ACK from the device.
+#define NVRM_I2C_NOACK (0x8)
+
+// Software I2C using GPIO. Doesn't use the hardware controllers. This path
+// should be used only for testing.
+#define NVRM_I2C_SOFTWARE_CONTROLLER (0x10)
+
+typedef struct NvRmI2cTransactionInfoRec
+{
+
+ /// Flags to indicate the transaction details, like write/read or read
+ /// without a stop or write without a stop.
+ NvU32 Flags;
+
+ /// Number of bytes to be transferred.
+ NvU32 NumBytes;
+
+ /// I2C slave device address
+ NvU32 Address;
+
+ /// Indicates that the address is a 10-bit address.
+ NvBool Is10BitAddress;
+} NvRmI2cTransactionInfo;
+
+/**
+ * @brief Does multiple I2C transactions. Each transaction can be a read or write.
+ *
+ * AP15 I2C controller has the following limitations:
+ * - Any read/write transaction is limited to NVRM_I2C_PACKETSIZE
+ * - All transactions will be terminated by STOP unless NVRM_I2C_NOSTOP flag
+ * is specified. Specifying NVRM_I2C_NOSTOP means, *next* transaction will start
+ * with a repeat start, with NO stop between transactions.
+ * - When NVRM_I2C_NOSTOP is specified for a transaction -
+ * 1. Next transaction will start with repeat start.
+ * 2. Next transaction is mandatory.
+ * 3. Next Next transaction cannot have NVRM_I2C_NOSTOP flag set. i.e no
+ * back to back repeat starts.
+ * 4. Current and next transactions are limited to size
+ * NVRM_I2C_PACKETSIZE_WITH_NOSTOP.
+ * 5. Finally, current transactions and next Transaction should be of same
+ * size.
+ *
+ * This imposes some limitations on how the hardware can be used. However, the
+ * API itself doesn't have any limitations. If the HW cannot be used, it falls
+ * back to GPIO based I2C. Gpio I2C bypasses Hw controller and bit bangs the
+ * SDA/SCL lines of I2C.
+ *
+ * @param hI2c Handle to the I2C channel.
+ * @param I2cPinMap for I2C controllers which are being multiplexed across
+ * multiple pin mux configurations, this specifies which pin mux configuration
+ * should be used for the transaction. Must be 0 when the ODM pin mux query
+ * specifies a non-multiplexed configuration for the controller.
+ * @param WaitTimeoutInMilliSeconds Timeout for the transcation.
+ * @param ClockSpeedKHz Clock speed in KHz.
+ * @param Data Continous stream of data
+ * @param DataLength Length of the data stream
+ * @param Transcations Pointer to the NvRmI2cTransactionInfo structure
+ * @param NumOfTransactions Number of transcations
+ *
+ *
+ * @retval NvSuccess Indicates the operation succeeded.
+ * @retval NvError_NotSupported Indicates assumption on parameter values violated.
+ * @retval NvError_InvalidState Indicates that the last read call is not
+ * completed.
+ * @retval NvError_ControllerBusy Indicates controller is presently busy with an
+ * i2c transaction.
+ * @retval NvError_InvalidDeviceAddress Indicates that the slave device address
+ * is invalid
+ */
+
+ NvError NvRmI2cTransaction(
+ NvRmI2cHandle hI2c,
+ NvU32 I2cPinMap,
+ NvU32 WaitTimeoutInMilliSeconds,
+ NvU32 ClockSpeedKHz,
+ NvU8 * Data,
+ NvU32 DataLen,
+ NvRmI2cTransactionInfo * Transaction,
+ NvU32 NumOfTransactions );
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_init.h b/arch/arm/mach-tegra/include/nvrm_init.h
new file mode 100644
index 000000000000..5aaa410d5b17
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_init.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_init_H
+#define INCLUDED_nvrm_init_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+
+#include "nvcommon.h"
+#include "nverror.h"
+
+/**
+ * NvRmDeviceHandle is an opaque handle to an RM device.
+ */
+
+typedef struct NvRmDeviceRec *NvRmDeviceHandle;
+
+/**
+ * A physical address type sized such that it matches the addressing support of
+ * the hardware modules RM typically interfaces with. May be smaller than an
+ * NvOsPhysAddr.
+ *
+ * XXX We should probably get rid of this and just use NvU32. It's rather
+ * difficult to explain what exactly NvRmPhysAddr is. Also, what if some units
+ * are upgraded to do 64-bit addressing and others remain 32? Would we really
+ * want to increase NvRmPhysAddr to NvU64 across the board?
+ *
+ * Another option would be to put the following types in nvcommon.h:
+ * typedef NvU32 NvPhysAddr32;
+ * typedef NvU64 NvPhysAddr64;
+ * Using these types would then be purely a form of documentation and nothing
+ * else.
+ *
+ * This header file is a somewhat odd place to put this type. Putting it in
+ * memmgr would be even worse, though, because then a lot of header files would
+ * all suddenly need to #include nvrm_memmgr.h just to get the NvRmPhysAddr
+ * type. (They already all include this header anyway.)
+ */
+
+typedef NvU32 NvRmPhysAddr;
+
+/**
+ * Opens the Resource Manager for a given device.
+ *
+ * Can be called multiple times for a given device. Subsequent
+ * calls will not necessarily return the same handle. Each call to
+ * NvRmOpen() must be paired with a corresponding call to NvRmClose().
+ *
+ * Assert encountered in debug mode if DeviceId value is invalid.
+ *
+ * This call is not intended to perform any significant hardware
+ * initialization of the device; rather its primary purpose is to
+ * initialize RM's internal data structures that are involved in
+ * managing the device.
+ *
+ * @param pHandle the RM handle is stored here.
+ * @param DeviceId implementation-dependent value specifying the device
+ * to be opened. Currently must be set to zero.
+ *
+ * @retval NvSuccess Indicates that RM was successfully opened.
+ * @retval NvError_InsufficientMemory Indicates that RM was unable to allocate
+ * memory for its internal data structures.
+ */
+
+ NvError NvRmOpen(
+ NvRmDeviceHandle * pHandle,
+ NvU32 DeviceId );
+
+/**
+ * Called by the platform/OS code to initialize the Rm. Usage and
+ * implementation of this API is platform specific.
+ *
+ * This APIs should not be called by the normal clients of the Rm.
+ *
+ * This APIs is guaranteed to succeed on the supported platforms.
+ *
+ * @param pHandle the RM handle is stored here.
+ */
+
+ void NvRmInit(
+ NvRmDeviceHandle * pHandle );
+
+/**
+ * Temporary version of NvRmOpen lacking the DeviceId parameter
+ */
+
+ NvError NvRmOpenNew(
+ NvRmDeviceHandle * pHandle );
+
+/**
+ * Closes the Resource Manager for a given device.
+ *
+ * Each call to NvRmOpen() must be paired with a corresponding call
+ * to NvRmClose().
+ *
+ * @param hDevice The RM handle. If hDevice is NULL, this API has no effect.
+ */
+
+ void NvRmClose(
+ NvRmDeviceHandle hDevice );
+
+/** @} */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_interrupt.h b/arch/arm/mach-tegra/include/nvrm_interrupt.h
new file mode 100644
index 000000000000..ad06f78b1c8b
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_interrupt.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_interrupt_H
+#define INCLUDED_nvrm_interrupt_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_module.h"
+#include "nvrm_init.h"
+
+#include "nvos.h"
+
+/** @file
+ * @brief <b>NVIDIA Driver Development Kit:
+ * Resource Manager %Interrupt API</b>
+ *
+ * @b Description: Declares the interrupt API for use by NvDDK modules.
+ */
+
+/**
+ * @defgroup nvrm_interrupt RM Interrupt Management Services
+ *
+ * @ingroup nvddk_rm
+ * @{
+ *
+ * IRQ Numbers
+ * -----------
+ * In most cases, we are using the CPU's legacy interrupt support, rather than
+ * the new MPCore interrupt controller. This means that we only have one ISR
+ * shared between all of the devices in our chip. To determine which device is
+ * interrupting us, we have to read some registers. We assign each interrupt
+ * source an "IRQ number". IRQ numbers are OS-independent and HW-dependent (a
+ * given device may have a different IRQ number from chip to chip).
+ *
+ * It is arbitrary how far we decode interrupts as part of determining the IRQ
+ * number. Normally we might assign one IRQ number to each interrupt line that
+ * feeds into the main interrupt controller (typically one per device in the
+ * chip), but we can decode further if we want. For example, there are several
+ * GPIO controllers, each of which controls 32 GPIO lines. The GPIO controller
+ * interrupt line is constructed by OR'ing together the interrupt lines for each
+ * of the 32 GPIO pins. If we want, we can assign each GPIO controller 32
+ * separate IRQ numbers, one per GPIO line; this simply means we have to sub-
+ * decode the interrupts a little further inside the ISR.
+ *
+ * The main advantage of doing this sub-decoding is that only a single driver is
+ * allowed to hook each interrupt source -- if multiple drivers both want to
+ * register interrupt handlers for the same interrupt source, the drivers will
+ * fight with one another trying to handle the same interrupt, so this is an
+ * error. At the same time, it's entirely plausible that out of a group of 32
+ * GPIO pins, multiple different drivers care about different groups of those
+ * pins. In the absence of sub-decoding, we would have to implement a "GPIO
+ * driver" whose sole purpose was to allow those other drivers to register for
+ * GPIO notifications, and then use driver-to-driver signaling to indicate when
+ * a pin has transitioned state. This is an extra level of overhead compared
+ * to if drivers are allowed to directly hook the interrupts for the pins they
+ * care about.
+ *
+ * Because IRQ numbers change from chip to chip, you must ask the RM for the IRQ
+ * number of the device when you want to hook its interrupt. This can be
+ * accomplished using the NvRmGetIrqForLogicalInterrupt() API. You pass it an
+ * [NvRmModuleID, Index] pair telling it what device you are interested in, and
+ * which sub-interrupt within that device. Often Index is just zero (many
+ * devices only have one IRQ number). For GPIO it might by the pin number
+ * within the GPIO controller. For UART, you might (entirely hypothetically --
+ * there is no requirement that you do this) have Index=0 for the receive
+ * interrupt and Index=1 for the send interrupt.
+ *
+ * Hooking an Interrupt
+ * --------------------
+ * Once you have the IRQ number(s), you can hook the interrupt(s) by calling
+ * NvRmInterruptRegister(). At driver shutdown, you can unhook the interrupt(s)
+ * by calling NvRmInterruptUnregister().
+ *
+ * NvRmInterruptRegister takes a list of IRQs and a list of callback functions to be
+ * called when the corresponding interrupt has fired. The callback functions
+ * will be passed an extra "void *context" parameter, typically a pointer to
+ * your private driver structure that keeps track of the state of your device.
+ * For example, the NAND driver might pass the NvDdkNandHandle as the context
+ * param.
+ *
+ * Drivers that care about more than one IRQ should call NvRmInterruptRegister only
+ * once. Calling NvRmInterruptRegister twice (each time with a single IRQ number)
+ * may consume more system resources than calling NvRmInterruptRegister once with
+ * a list of 2 IRQ numbers and 2 callbacks.
+ *
+ * Rules for Interrupt Handlers
+ * ----------------------------
+ * We assume that all interrupt handlers (i.e. the callbacks passed to
+ * NvRmInterruptRegister) are "fast": that is, any complex processing that cannot
+ * complete in a tightly bounded amount of time, such as polling registers to
+ * wait for the HW to complete some processing, is not done in the ISR proper.
+ * Instead, the ISR would signal a semaphore, clear the interrupt, and pass off
+ * the rest of the work to another thread.
+ *
+ * To be more precise about this, we expect all interrupt handlers to follow
+ * these rules:
+ * - They may only call a subset of NvOs functions. The exact subset is
+ * documented in nvos.h.
+ * - No floating-point. (We don't want to have to save and restore the
+ * floating point registers on an interrupt.)
+ * - They should use as little stack space as possible. They certainly should
+ * not use any recursive algorithms, for example. (For example, if they need
+ * to look up a node in a red-black tree, they must use an iterative version
+ * of the tree search rather than recursion.) Straw man: 256B maximum?
+ * - Any control flow structure that involves looping (like a "for" or "while"
+ * statement) must be guaranteed to terminate within a clearly understood
+ * time limit. We don't have a strict upper bound, but if it takes
+ * milliseconds, it's out of the question.
+ * - The callback function _must_ clear the cause of the interrupt. Upon
+ * returning from the callback the interrupt will be automatically re-enabled.
+ * If the cause is not cleared the system will be stuck in an infinite loop
+ * taking interrupts.
+ */
+
+/**
+ * A Logical Interrupt is a tuple that includes the class of interrupts
+ * (i.e., a module), an instance of that module, and the specific interrupt
+ * within that instance (an index). This is an abstraction for the
+ * actual interrupt bits implemented on the SOC.
+ */
+
+typedef struct NvRmLogicalIntrRec
+{
+
+ /**
+ * Interrupt index within the current instance of specified Module.
+ * This identifies a specific interrupt. This is an enumerated index
+ * and not a bit-mask.
+ */
+ NvU8 Index;
+
+ /**
+ * The SOC hardware controller class identifier
+ */
+ NvRmModuleID ModuleID;
+} NvRmLogicalIntr;
+
+/**
+ * Translate a given logical interrupt to its corresponding IRQ number.
+ *
+ * @param hRmDevice The RM device handle
+ * @param ModuleID The module of interest
+ * @param Index Zero-based interrupt index within the module
+ *
+ * @return The IRQ number.
+ */
+
+ NvU32 NvRmGetIrqForLogicalInterrupt(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID ModuleID,
+ NvU32 Index );
+
+/**
+ * Retrieve the number of IRQs associated with a particular module instance.
+ *
+ * @param hRmDevice The RM device handle
+ * @param ModuleID The module of interest
+ *
+ * @return The number of IRQs.
+ */
+
+ NvU32 NvRmGetIrqCountForLogicalInterrupt(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID ModuleID );
+
+/*
+ * Register the interrupt with the given interrupt handler.
+ *
+ * Assert encountered in debug mode if irq number is not valid.
+ *
+ * @see NvRmInterruptEnable()
+ *
+ * @param hRmDevice The RM device handle.
+ * @param IrqListSize size of the IrqList passed in for registering the irq
+ * handlers for each irq number.
+ * @param pIrqList Array of IRQ numbers for which interupt handlers to be
+ * registerd.
+ * @param pIrqHandlerList array intrupt routine to be called when interrupt
+ * occures.
+ * @param context pointer to the registrer's context handle
+ * @param handle handle to the registered interrupts. This handle is used by for
+ * unregistering the interrupt.
+ * @param InterruptEnable If true, immediately enable interrupt. Otherwise
+ * enable interrupt only after calling NvRmInterruptEnable().
+ *
+ * @retval "NvError_IrqRegistrationFailed" if interupt is already registred.
+ * @retval "NvSuccess" if registration is successfull.
+ */
+NvError
+NvRmInterruptRegister(
+ NvRmDeviceHandle hRmDevice,
+ NvU32 IrqListSize,
+ const NvU32 *pIrqList,
+ const NvOsInterruptHandler *pIrqHandlerList,
+ void *context,
+ NvOsInterruptHandle *handle,
+ NvBool InterruptEnable);
+
+/**
+ * Un-registers the interrupt handler from the associated interrupt handle which
+ * is returned by the NvRmInterruptRegister API.
+ *
+ * @param handle Handle returned when the interrupt is registered.
+ */
+void
+NvRmInterruptUnregister(
+ NvRmDeviceHandle hRmDevice,
+ NvOsInterruptHandle handle);
+
+/**
+ * Enable the interrupt handler from the associated interrupt handle which
+ * is returned by the NvRmInterruptRegister API.
+ *
+ * @param handle Handle returned when the interrupt is registered.
+ *
+ * @retval "NvError_BadParameter" if handle is not valid
+ * @retval "NvError_InsufficientMemory" if interupt enable failed.
+ * @retval "NvSuccess" if registration is successfull.
+ */
+NvError
+NvRmInterruptEnable(
+ NvRmDeviceHandle hRmDevice,
+ NvOsInterruptHandle handle);
+
+/**
+ * Called by the interrupt callaback to re-enable the interrupt.
+ */
+
+void
+NvRmInterruptDone( NvOsInterruptHandle handle );
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_keylist.h b/arch/arm/mach-tegra/include/nvrm_keylist.h
new file mode 100644
index 000000000000..350282791e8c
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_keylist.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_keylist_H
+#define INCLUDED_nvrm_keylist_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_module.h"
+#include "nvrm_init.h"
+
+/** @file
+ * @brief <b>NVIDIA Driver Development Kit:
+ * Resource Manager Key-List APIs</b>
+ *
+ * @b Description: This API, defines a simple means to set/get the state
+ * of ODM-Defined Keys.
+ */
+
+#include "nvos.h"
+#include "nvodm_keylist_reserved.h"
+
+/**
+ * Searches the List of Keys present and returns
+ * the Value of the appropriate Key.
+ *
+ * @param hRm Handle to the RM Device.
+ * @param KeyID ID of the key whose value is required.
+ *
+ * @retval returns the value of the corresponding key. If the Key is not
+ * present in the list, it returns 0.
+ */
+
+
+
+ NvU32 NvRmGetKeyValue(
+ NvRmDeviceHandle hRm,
+ NvU32 KeyID );
+
+/**
+ * Searches the List of Keys Present and sets the value of the Key to the value
+ * given. If the Key is not present, it adds the key to the list and sets the
+ * value.
+ *
+ * @param hRM Handle to the RM Device.
+ * @param KeyID ID of the key whose value is to be set.
+ * @param Value Value to be set for the corresponding key.
+ *
+ * @retval NvSuccess Value has been successfully set.
+ * @retval NvError_InsufficientMemory Operation has failed while adding the
+ * key to the existing list.
+ */
+
+ NvError NvRmSetKeyValuePair(
+ NvRmDeviceHandle hRm,
+ NvU32 KeyID,
+ NvU32 Value );
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_memctrl.h b/arch/arm/mach-tegra/include/nvrm_memctrl.h
new file mode 100644
index 000000000000..b760dd538c12
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_memctrl.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_memctrl_H
+#define INCLUDED_nvrm_memctrl_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_module.h"
+#include "nvrm_init.h"
+
+#include "nvcommon.h"
+#include "nvassert.h"
+#include "nvos.h"
+
+/*
+ * @ingroup nvrm_memctrl
+ * @{
+ */
+
+/**
+ * NvRmDeviceHandle is an opaque handle to an RM device.
+ */
+
+/**
+ * Start collecting statistics for specified clients. (2 normal clients and 1 llc client)
+ *
+ * @param rm the RM handle is stored here.
+ * @param client_id_0 the ID of the first client
+ * @param client_id_1 the ID of the second client
+ * @param llc_client_id the ID of the llc client
+ *
+ */
+
+ void McStat_Start(
+ NvRmDeviceHandle rm,
+ NvU32 client_id_0,
+ NvU32 client_id_1,
+ NvU32 llc_client_id );
+
+/**
+ * Stop the counter for collecting statistics for specified clinets
+ * @param rm the RM handle is stored here
+ * @param client_0_cycles pointer to the number of cycles of client_0
+ * @param client_1_cycles pointer to the number of cycles of client_1
+ * @param llc_client_cycles pointer to the number of cycles of llc client
+ * @param llc_client_clocks pointer to the llc client's clock
+ * @param mc_clocks pointer to the memory controller's clock
+ */
+
+ void McStat_Stop(
+ NvRmDeviceHandle rm,
+ NvU32 * client_0_cycles,
+ NvU32 * client_1_cycles,
+ NvU32 * llc_client_cycles,
+ NvU32 * llc_client_clocks,
+ NvU32 * mc_clocks );
+
+/**
+ * Print out the collected memory control stat data
+ * @param client_id_0 the first client's ID
+ * @param client_0_cycles the number of cycles of client_0 from start to stop
+ * @param client_id_1 the second client's ID
+ * @param client_1_cycles the number of cycles of client_1 from start to stop
+ * @param llc_client_id the ID of llc client
+ * @param llc_client_clocks the clocks of llc client
+ * @param llc_client_cycles the number of cycles of llc client
+ * @param mc_clocks the memory controller's clock
+ */
+
+ void McStat_Report(
+ NvU32 client_id_0,
+ NvU32 client_0_cycles,
+ NvU32 client_id_1,
+ NvU32 client_1_cycles,
+ NvU32 llc_client_id,
+ NvU32 llc_client_clocks,
+ NvU32 llc_client_cycles,
+ NvU32 mc_clocks );
+
+/**
+ * Read the data of specified module and bit field
+ * @param modId the specified module ID
+ * @param start_index the start index of the required data
+ * @param length the length of the data
+ * @param value pointer to the variable that will store the data specified
+ *
+ * @retval NvSuccess Indicate the the data is read successfully
+ */
+
+ NvError ReadObsData(
+ NvRmDeviceHandle rm,
+ NvRmModuleID modId,
+ NvU32 start_index,
+ NvU32 length,
+ NvU32 * value );
+
+/**
+ * Starts CPU performance monitors for the specified list of events
+ * (if monitors were already running they are restarted).
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pEventListSize Pointer to the event list size. On entry specifies
+ * list size allocated by the client, on exit - actual number of event monitors
+ * started. If entry size is 0, maximum number of monitored events is returned.
+ * @param pEventList Pointer to the list of events to be monitored. Ignored
+ * if input list size is 0. Monitors run status is not affected in this case.
+ *
+ * @note No event validation is performed. It is caller responsibility to pass
+ * valid event codes. See ARM control coprocessor CP15 specification for the
+ * list of event numbers and the respective event definitions.
+ *
+ * @retval NvSuccess if monitoring start function completed successfully.
+ * @retval NvError_NotSupported if core performance monitoring is not supported.
+ */
+
+ NvError NvRmCorePerfMonStart(
+ NvRmDeviceHandle hRmDevice,
+ NvU32 * pEventListSize,
+ NvU32 * pEventList );
+
+/**
+ * Stops CPU performance monitors and returns event counts.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCountListSize Pointer to the count list size. On entry specifies
+ * list size allocated by the client, on exit - actual number of event counts
+ * returned.
+ * @param pCountList Pointer to the list filled in by this function with event
+ * counts since performance monitoring started. The order of returned counts
+ * is the same as the order of events specified by NvRmCorePerfMonStart()
+ * call. If input list size exceeds number of started event monitors the extra
+ * counts are meaningless. If input list size is 0, this parameter is ignored,
+ * and no event counts are returned.
+ * @param pTotalCycleCount Pointer to the total number of CPU clock cycles
+ * since performance monitoring started.
+ *
+ * @retval NvSuccess if monitoring results are retrieved successfully.
+ * @retval NvError_InvalidState if core performance monitoring has not been
+ * started or monitor overflow has occurred.
+ * @retval NvError_NotSupported if core performance monitoring is not supported.
+ */
+
+ NvError NvRmCorePerfMonStop(
+ NvRmDeviceHandle hRmDevice,
+ NvU32 * pCountListSize,
+ NvU32 * pCountList,
+ NvU32 * pTotalCycleCount );
+
+/** @} */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_memmgr.h b/arch/arm/mach-tegra/include/nvrm_memmgr.h
new file mode 100644
index 000000000000..cc431c8763f7
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_memmgr.h
@@ -0,0 +1,1013 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_memmgr_H
+#define INCLUDED_nvrm_memmgr_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_init.h"
+
+#include "nvos.h"
+
+/**
+ * FAQ for commonly asked questions:
+ *
+ * Q) Why can NvRmMemMap fail?
+ * A) Some operating systems don't allow user mode applications to map arbitrary
+ * memory regions, this is a huge security hole. In other environments, such
+ * as simulation, its just not even possible to get a direct pointer to
+ * the memory, because the simulation is in a different process.
+ *
+ * Q) What do I do if NvRmMemMap fails?
+ * A) Driver writers have two choices. If the driver must have a mapping, for
+ * example direct draw requires a pointer to the memory then the driver
+ * will have to fail whatever operation it is doing and return an error.
+ * The other choice is to fall back to using NvRmMemRead/Write functions
+ * or NvRmMemRdxx/NvRmMemWrxx functions, which are guaranteed to succeed.
+ *
+ * Q) Why should I use NvRmMemMap instead of NvOsPhysicalMemMap?
+ * A) NvRmMemMap will do a lot of extra work in an OS like WinCE to create
+ * a new mapping to the memory in your process space. NvOsPhysicalMemMap
+ * will is for mapping registers and other non-memory locations. Using
+ * this API on WindowsCE will cause WindowsCE to crash.
+ */
+
+
+
+/**
+ * UNRESOLVED ISSUES:
+ *
+ * 1. Should we have NvRmFill* APIs in addition to NvRmWrite*? Say, if you just
+ * want to clear a buffer to zero?
+ *
+ * 2. There is currently an issue with a memhandle that is shared across
+ * processes. If a MemHandle is created, and then duplicated into another
+ * process uesing NvRmMemHandleGetId/NvRmMemHandleFromId it's not clear
+ * what would happen if both processes tried to do an NvRmAlloc on a handle.
+ * Perhaps make NvRmMemHandleGetId fail if the memory is not already
+ * allocated.
+ *
+ * 3. It may be desirable to have more hMem query functions, for debuggability.
+ * Part of the information associated with a memory buffer will live in
+ * kernel space, and not be accesible efficiently from a user process.
+ * Knowing which heap a buffer is in, or whether a buffer is pinned or
+ * mapped could be useful. Note that queries like this could involve race
+ * conditions. For example, memory could be moved from one heap to another
+ * the moment after you ask what heap it's in.
+ */
+
+/**
+ * @defgroup nvrm_memmgr RM Memory Management Services
+ *
+ * @ingroup nvddk_rm
+ *
+ * The APIs in this header file are intended to be used for allocating and
+ * managing memory that needs to be accessed by HW devices. It is not intended
+ * as a replacement for malloc() -- that functionality is provided by
+ * NvOsAlloc(). If only the CPU will ever access the memory, this API is
+ * probably extreme overkill for your needs.
+ *
+ * Memory allocated by NvRmMemAlloc() is intended to be asynchronously movable
+ * by the RM at any time. Although discouraged, it is possible to permanently
+ * lock down ("pin") a memory buffer such that it can never be moved. Normally,
+ * however, the intent is that you would only pin a buffer for short periods of
+ * time, on an as-needed basis.
+ *
+ * The first step to allocating memory is allocating a handle to refer to the
+ * allocation. The handle has a separate lifetime from the underlying buffer.
+ * Some properties of the memory, such as its size in bytes, must be declared at
+ * handle allocation time and can never be changed.
+ *
+ * After successfully allocating a handle, you can specify properties of the
+ * memory buffer that are allowed to change over time. (Currently no such
+ * properties exist, but in the past a "priority" attribute existed and may
+ * return some day in the future.)
+ *
+ * After specifying the properties of the memory buffer, it can be allocated.
+ * Some additional properties, such as the set of heaps that the memory is
+ * permitted to be allocated from, must be specified at allocation time and
+ * cannot be changed over the buffer's lifetime of the buffer.
+ *
+ * The contents of memory can be examined and modified using a variety of read
+ * and write APIs, such as NvRmMemRead and NvRmMemWrite. However, in some
+ * cases, it is necessary for the driver or application to be able to directly
+ * read or write the buffer using a pointer. In this case, the NvRmMemMap API
+ * can be used to obtain such a mapping into the current process's virtual
+ * address space. It is important to note that the map operation is not
+ * guaranteed to succeed. Drivers that use mappings are strongly encouraged
+ * to support two code paths: one for when the mapping succeeds, and one for
+ * when the mapping fails. A memory buffer is allowed to be mapped multiple
+ * times, and the mappings are permitted to be of subregions of the buffer if
+ * desired.
+ *
+ * Before the memory buffer is used, it must be pinned. While pinned, the
+ * buffer will not be moved, and its physical address can be safely queried. A
+ * memory buffer can be pinned multiple times, and the pinning will be reference
+ * counted. Assuming a valid handle and a successful allocation, pinning can
+ * never fail.
+ *
+ * After the memory buffer is done being used, it should be unpinned. Unpinning
+ * never fails. Any unpinned memory is free to be moved to any location which
+ * satisfies the current properties in the handle. Drivers are strongly
+ * encouraged to unpin memory when they reach a quiescent state. It is not
+ * unreasonable to have a goal that all memory buffers (with the possible
+ * exception of memory being continuously scanned out by the display) be
+ * unpinned when the system is idle.
+ *
+ * The NvRmMemPin API is only one of the two ways to pin a buffer. In the case
+ * of modules that are programmed through command buffers submitted through
+ * host, it is not the preferred way to pin a buffer. The "RELOC" facility in
+ * the stream API should be used instead if possible. It is conceivable that in
+ * the distant future, the NvRmMemPin API might be removed. In such a world,
+ * all graphics modules would be expected to use the RELOC API or a similar API,
+ * and all IO modules would be expected to use zero-copy DMA directly from the
+ * application buffer using NvOsPageLock.
+ *
+ * Some properties of a buffer can be changed at any point in its handle's
+ * lifetime. Properties that are changed while a memory buffer is pinned will
+ * have no effect until the memory is unpinned.
+ *
+ * After you are done with a memory buffer, you must free its handle. This
+ * automatically unpins the memory (if necessary) and frees the storage (if any)
+ * associated with it.
+ *
+ * @ingroup nvrm_memmgr
+ * @{
+ */
+
+
+/**
+ * A type-safe handle for a memory buffer.
+ */
+
+typedef struct NvRmMemRec *NvRmMemHandle;
+
+/**
+ * Define for invalid Physical address
+ */
+#define NV_RM_INVALID_PHYS_ADDRESS (0xffffffff)
+
+/**
+ * NvRm heap identifiers.
+ */
+
+typedef enum
+{
+
+ /**
+ * External (non-carveout, i.e., OS-managed) memory heap.
+ */
+ NvRmHeap_External = 1,
+
+ /**
+ * GART memory heap. The GART heap is really an alias for the External
+ * heap. All GART allocations will come out of the External heap, but
+ * additionally all such allocations will be mapped in the GART. Calling
+ * NvRmMemGetAddress() on a buffer allocated in the GART heap will return
+ * the GART address, not the underlying memory address.
+ */
+ NvRmHeap_GART,
+
+ /**
+ * Carve-out memory heap within external memory.
+ */
+ NvRmHeap_ExternalCarveOut,
+
+ /**
+ * IRAM memory heap.
+ */
+ NvRmHeap_IRam,
+ NvRmHeap_Num,
+ NvRmHeap_Force32 = 0x7FFFFFFF
+} NvRmHeap;
+
+/**
+ * NvRm heap statistics. See NvRmMemGetStat() for further details.
+ */
+
+typedef enum
+{
+
+ /**
+ * Total number of bytes reserved for the carveout heap.
+ */
+ NvRmMemStat_TotalCarveout = 1,
+
+ /**
+ * Number of bytes used in the carveout heap.
+ */
+ NvRmMemStat_UsedCarveout,
+
+ /**
+ * Size of the largest free block in the carveout heap.
+ * Size can be less than the difference of total and
+ * used memory.
+ */
+ NvRmMemStat_LargestFreeCarveoutBlock,
+
+ /**
+ * Total number of bytes in the GART heap.
+ */
+ NvRmMemStat_TotalGart,
+
+ /**
+ * Number of bytes reserved from the GART heap.
+ */
+ NvRmMemStat_UsedGart,
+
+ /**
+ * Size of the largest free block in GART heap. Size can be
+ * less than the difference of total and used memory.
+ */
+ NvRmMemStat_LargestFreeGartBlock,
+ NvRmMemStat_Num,
+ NvRmMemStat_Force32 = 0x7FFFFFFF
+} NvRmMemStat;
+
+/**
+ * Allocates a memory handle that can be used to specify a memory allocation
+ * request and manipulate the resulting storage.
+ *
+ * @see NvRmMemHandleFree()
+ *
+ * @param hDevice An RM device handle.
+ * @param phMem A pointer to an opaque handle that will be filled in with the
+ * new memory handle.
+ * @param Size Specifies the requested size of the memory buffer in bytes.
+ *
+ * @retval NvSuccess Indicates the memory handle was successfully allocated.
+ * @retval NvError_InsufficientMemory Insufficient system memory exists to
+ * allocate the memory handle.
+ */
+
+ NvError NvRmMemHandleCreate(
+ NvRmDeviceHandle hDevice,
+ NvRmMemHandle * phMem,
+ NvU32 Size );
+
+/**
+ * Looks up a pre-existing memory handle whose allocation was preserved through
+ * the boot process.
+ *
+ * Looking up a memory handle is a one-time event. Once a preserved handle
+ * has been successfully looked up, it may not be looked up again. Memory
+ * handles created with this mechanism behave identically to memory handles
+ * created through NvRmMemHandleCreate, including freeing the allocation with
+ * NvRmMemHandleFree.
+ *
+ * @param hDevice An RM device handle.
+ * @param Key The key value that was returned by the earlier call to
+ * @see NvRmMemHandlePreserveHandle.
+ * @param phMem A pointer to an opaque handle that will be filled in with the
+ * queried memory handle, if a preserved handle matching the key is found.
+ *
+ * @retval NvSuccess Indicates that the key was found and the memory handle
+ * was successfully created.
+ * @retval NvError_InsufficientMemory Insufficient system memory was available
+ * to perform the operation, or if no memory handle exists for the specified
+ * Key.
+ */
+
+ NvError NvRmMemHandleClaimPreservedHandle(
+ NvRmDeviceHandle hDevice,
+ NvU32 Key,
+ NvRmMemHandle * phMem );
+
+/**
+ * Adds a memory handle to the set of memory handles which will be preserved
+ * between the current OS context and a subsequent OS context.
+ *
+ * @param hMem The handle which will be marked for preservation
+ * @param Key A key which can be used to claim the memory handle in a
+ * different OS context.
+ *
+ * @retval NvSuccess Indicates that the memory handle will be preserved
+ * @retval NvError_InsufficientMemory Insufficient system or BootArg memory
+ * was avaialable to mark the memory handle as preserved.
+ */
+
+ NvError NvRmMemHandlePreserveHandle(
+ NvRmMemHandle hMem,
+ NvU32 * Key );
+
+/**
+ * Frees a memory handle obtained from NvRmMemHandleCreate(),
+ * or NvRmMemHandleFromId().
+ *
+ * Fully disposing of a handle requires calling this API one time, plus one
+ * time for each NvRmMemHandleFromId(). When the internal reference count of
+ * the handle reaches zero, all resources for the handle will be released, even
+ * if the memory is marked as pinned and/or mapped. It is the caller's
+ * responsibility to ensure mappings are released before calling this API.
+ *
+ * When the last handle is closed, the associated storage will be implicitly
+ * unpinned and freed.
+ *
+ * This API cannot fail.
+ *
+ * @see NvRmMemHandleCreate()
+ * @see NvRmMemHandleFromId()
+ *
+ * @param hMem A previously allocated memory handle. If hMem is NULL, this API
+ * has no effect.
+ */
+
+ void NvRmMemHandleFree(
+ NvRmMemHandle hMem );
+
+/**
+ * Allocate storage for a memory handle. The storage must satisfy:
+ * 1) all specified properties in the hMem handle
+ * 2) the alignment parameters
+ *
+ * Memory allocated by this API is intended to be used by modules which
+ * control hardware devices such as media accelerators or I/O controllers.
+ *
+ * The memory will initially be in an unpinned state.
+ *
+ * Assert encountered in debug mode if alignment was not a power of two,
+ * or coherency is not one of NvOsMemAttribute_Uncached,
+ * NvOsMemAttribute_WriteBack or NvOsMemAttribute_WriteCombined.
+ *
+ * @see NvRmMemPin()
+ *
+ * @param hMem The memory handle to allocate storage for.
+ * @param Heaps[] An array of heap enumerants that indicate which heaps the
+ * memory buffer is allowed to live in. When a memory buffer is requested
+ * to be allocated or needs to be moved, Heaps[0] will be the first choice
+ * to allocate from or move to, Heaps[1] will be the second choice, and so
+ * on until the end of the array.
+ * @params NumHeaps The size of the Heaps[] array. If NumHeaps is zero, then
+ * Heaps must also be NULL, and the RM will select a default list of heaps
+ * on the client's behalf.
+ * @param Alignment Specifies the requested alignment of the buffer, measured in
+ * bytes. Must be a power of two.
+ * @param Coherency Specifies the cache coherency mode desired if the memory
+ * is ever mapped.
+ *
+ * @retval NvSuccess Indicates the memory buffer was successfully
+ * allocated.
+ * @retval NvError_InsufficientMemory Insufficient memory exists that
+ * satisfies the specified memory handle properties and API parameters.
+ * @retval NvError_AlreadyAllocated hMem already has a memory buffer
+ * allocated.
+ */
+
+ NvError NvRmMemAlloc(
+ NvRmMemHandle hMem,
+ const NvRmHeap * Heaps,
+ NvU32 NumHeaps,
+ NvU32 Alignment,
+ NvOsMemAttribute Coherency );
+
+/**
+ * Attempts to lock down a piece of previously allocated memory. By default
+ * memory is "movable" until it is pinned -- the RM is free to relocate it from
+ * one address or heap to another at any time for any reason (say, to defragment
+ * a heap). This function can be called to prevent the RM from moving the
+ * memory.
+ *
+ * While a memory buffer is pinned, its physical address can safely be queried
+ * with NvRmMemGetAddress().
+ *
+ * This API always succeeds.
+ *
+ * Pins are reference counted, so the memory will remain pinned until all Pin
+ * calls have had a matching Unpin call.
+ *
+ * Pinning and mapping a memory buffer are completely orthogonal. It is not
+ * necessary to pin a buffer before mapping it. Mapping a buffer does not imply
+ * that it is pinned.
+ *
+ * @see NvRmMemGetAddress()
+ * @see NvRmMemUnpin()
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate,
+ * NvRmMemHandleFromId.
+ *
+ * @returns The physical address of the first byte in the specified memory
+ * handle's storage. If the memory is mapped through the GART, the
+ * GART address will be returned, not the address of the underlying memory.
+ */
+
+ NvU32 NvRmMemPin(
+ NvRmMemHandle hMem );
+
+ /**
+ * A multiple handle version of NvRmMemPin to reduce kernel trap overhead.
+ *
+ * @see NvRmMemPin
+ *
+ * @param hMems An array of memory handles to pin
+ * @param Addrs An arary of address (the result of the pin)
+ * @param Count The number of handles and addresses
+ */
+
+ void NvRmMemPinMult(
+ NvRmMemHandle * hMems,
+ NvU32 * Addrs,
+ NvU32 Count );
+
+/**
+ * Retrieves a physical address for an hMem handle and an offset into that
+ * handle's memory buffer.
+ *
+ * If the memory referred to by hMem is not pinned, the return value is
+ * undefined, and an assert will fire in a debug build.
+ *
+ * @see NvRmMemPin()
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param Offset The offset into the memory buffer for which the
+ * address is desired.
+ *
+ * @returns The physical address of the specified byte within the specified
+ * memory handle's storage. If the memory is mapped through the GART, the
+ * GART address will be returned, not the address of the underlying memory.
+ */
+
+ NvU32 NvRmMemGetAddress(
+ NvRmMemHandle hMem,
+ NvU32 Offset );
+
+/**
+ * Unpins a memory buffer so that it is once again free to be moved. Pins are
+ * reference counted, so the memory will not become movable until all Pin calls
+ * have had a matching Unpin call.
+ *
+ * If the pin count is already zero when this API is called, the behavior is
+ * undefined, and an assert will fire in a debug build.
+ *
+ * This API cannot fail.
+ *
+ * @see NvRmMemPin()
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * If hMem is NULL, this API will do nothing.
+ */
+
+ void NvRmMemUnpin(
+ NvRmMemHandle hMem );
+
+ /**
+ * A multiple handle version of NvRmMemUnpin to reduce kernel trap overhead.
+ *
+ * @see NvRmMemPin
+ *
+ * @param hMems An array of memory handles to unpin
+ * @param Count The number of handles and addresses
+ */
+
+ void NvRmMemUnpinMult(
+ NvRmMemHandle * hMems,
+ NvU32 Count );
+
+/**
+ * Attempts to map a memory buffer into the process's virtual address space.
+ *
+ * It is recommended that mappings be short-lived as some systems have a limited
+ * number of concurrent mappings that can be supported, or because virtual
+ * address space may be scarce.
+ *
+ * It is legal to have multiple concurrent mappings of a single memory buffer.
+ *
+ * Pinning and mapping a memory buffer are completely orthogonal. It is not
+ * necessary to pin a buffer before mapping it. Mapping a buffer does not imply
+ * that it is pinned.
+ *
+ * There is no guarantee that the mapping will succeed. For example, on some
+ * operating systems, the OS's security mechanisms make it impossible for
+ * untrusted applications to map certain types of memory. A mapping might also
+ * fail due to exhaustion of memory or virtual address space. Therefore, you
+ * must implement code paths that can handle mapping failures. For example, if
+ * the mapping fails, you may want to fall back to using NvRmMemRead() and
+ * NvRmMemWrite(). Alternatively, you may want to consider avoiding the use of
+ * this API altogether, unless there is a compelling reason why you need
+ * mappings.
+ *
+ * @see NvRmMemUnmap()
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param Offset Byte offset within the memory buffer to start the map at.
+ * @param Size Size in bytes of mapping requested. Must be greater than 0.
+ * @param Flags Special flags -- use NVOS_MEM_* (see nvos.h for details)
+ * @param pVirtAddr If the mapping is successful, provides a virtual
+ * address through which the memory buffer can be accessed.
+ *
+ * @retval NvSuccess Indicates that the memory was successfully mapped.
+ * @retval NvError_InsufficientMemory The mapping was unsuccessful.
+ * This can occur if it is impossible to map the memory, or if offset+size
+ * is greater than the size of the buffer referred to by hMem.
+ * @retval NvError_NotSupported Mapping not allowed (e.g., for GART heap)
+ */
+
+NvError
+NvRmMemMap(
+ NvRmMemHandle hMem,
+ NvU32 Offset,
+ NvU32 Size,
+ NvU32 Flags,
+ void **pVirtAddr);
+
+/**
+ * Unmaps a memory buffer from the process's virtual address space. This API
+ * cannot fail.
+ *
+ * If hMem is NULL, this API will do nothing.
+ * If pVirtAddr is NULL, this API will do nothing.
+ *
+ * @see NvRmMemMap()
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param pVirtAddr The virtual address returned by a previous call to
+ * NvRmMemMap with hMem.
+ * @param Size The size in bytes of the mapped region. Must be the same as the
+ * Size value originally passed to NvRmMemMap.
+ */
+
+void NvRmMemUnmap(NvRmMemHandle hMem, void *pVirtAddr, NvU32 Size);
+
+/**
+ * Reads 8 bits of data from a buffer. This API cannot fail.
+ *
+ * If hMem refers to an unallocated memory buffer, this function's behavior is
+ * undefined and an assert will trigger in a debug build.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param Offset Byte offset relative to the base of hMem.
+ * May be arbitrarily aligned -- need not be a multiple of 2 or 4.
+ *
+ * @returns The value read from the memory location.
+ */
+
+NvU8 NvRmMemRd08(NvRmMemHandle hMem, NvU32 Offset);
+
+/**
+ * Reads 16 bits of data from a buffer. This API cannot fail.
+ *
+ * If hMem refers to an unallocated memory buffer, this function's behavior is
+ * undefined and an assert will trigger in a debug build.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param Offset Byte offset relative to the base of hMem.
+ * Must be a multiple of 2.
+ *
+ * @returns The value read from the memory location.
+ */
+
+NvU16 NvRmMemRd16(NvRmMemHandle hMem, NvU32 Offset);
+
+/**
+ * Reads 32 bits of data from a buffer. This API cannot fail.
+ *
+ * If hMem refers to an unallocated memory buffer, this function's behavior is
+ * undefined and an assert will trigger in a debug build.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param Offset Byte offset relative to the base of hMem.
+ * Must be a multiple of 4.
+ *
+ * @returns The value read from the memory location.
+ */
+
+NvU32 NvRmMemRd32(NvRmMemHandle hMem, NvU32 Offset);
+
+/**
+ * Writes 8 bits of data to a buffer. This API cannot fail.
+ *
+ * If hMem refers to an unallocated memory buffer, this function's behavior is
+ * undefined and an assert will trigger in a debug build.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param Offset Byte offset relative to the base of hMem.
+ * May be arbitrarily aligned -- need not be a multiple of 2 or 4.
+ * @param Data The data to write to the memory location.
+ */
+
+void NvRmMemWr08(NvRmMemHandle hMem, NvU32 Offset, NvU8 Data);
+
+/**
+ * Writes 16 bits of data to a buffer. This API cannot fail.
+ *
+ * If hMem refers to an unallocated memory buffer, this function's behavior is
+ * undefined and an assert will trigger in a debug build.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param Offset Byte offset relative to the base of hMem.
+ * Must be a multiple of 2.
+ * @param Data The data to write to the memory location.
+ */
+
+void NvRmMemWr16(NvRmMemHandle hMem, NvU32 Offset, NvU16 Data);
+
+/**
+ * Writes 32 bits of data to a buffer. This API cannot fail.
+ *
+ * If hMem refers to an unallocated memory buffer, this function's behavior is
+ * undefined and an assert will trigger in a debug build.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param Offset Byte offset relative to the base of hMem.
+ * Must be a multiple of 4.
+ * @param Data The data to write to the memory location.
+ */
+
+void NvRmMemWr32(NvRmMemHandle hMem, NvU32 Offset, NvU32 Data);
+
+/**
+ * Reads a block of data from a buffer. This API cannot fail.
+ *
+ * If hMem refers to an unallocated memory buffer, this function's behavior is
+ * undefined and an assert will trigger in a debug build.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param Offset Byte offset relative to the base of hMem.
+ * May be arbitrarily aligned -- need not be a multiple of 2 or 4.
+ * @param pDst The buffer where the data should be placed.
+ * May be arbitrarily aligned -- need not be located at a word boundary.
+ * @param Size The number of bytes of data to be read.
+ * May be arbitrarily sized -- need not be a multiple of 2 or 4.
+ */
+void NvRmMemRead(NvRmMemHandle hMem, NvU32 Offset, void *pDst, NvU32 Size);
+
+/**
+ * Writes a block of data to a buffer. This API cannot fail.
+ *
+ * If hMem refers to an unallocated memory buffer, this function's behavior is
+ * undefined and an assert will trigger in a debug build.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param Offset Byte offset relative to the base of hMem.
+ * May be arbitrarily aligned -- need not be a multiple of 2 or 4.
+ * @param pSrc The buffer to obtain the data from.
+ * May be arbitrarily aligned -- need not be located at a word boundary.
+ * @param Size The number of bytes of data to be written.
+ * May be arbitrarily sized -- need not be a multiple of 2 or 4.
+ */
+void NvRmMemWrite(
+ NvRmMemHandle hMem,
+ NvU32 Offset,
+ const void *pSrc,
+ NvU32 Size);
+
+/**
+ * Reads a strided series of blocks of data from a buffer. This API cannot
+ * fail.
+ *
+ * The total number of bytes copied is Count*ElementSize.
+ *
+ * If hMem refers to an unallocated memory buffer, this function's behavior is
+ * undefined and an assert will trigger in a debug build.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate.
+ * @param Offset Byte offset relative to the base of hMem.
+ * May be arbitrarily aligned -- need not be a multiple of 2 or 4.
+ * @param SrcStride The number of bytes separating each source element.
+ * May be arbitrarily aligned -- need not be a multiple of 2 or 4.
+ * @param pDst The buffer where the data should be placed.
+ * May be arbitrarily aligned -- need not be located at a word boundary.
+ * @param DstStride The number of bytes separating each destination element.
+ * May be arbitrarily aligned -- need not be a multiple of 2 or 4.
+ * @param ElementSize The number of bytes in each element.
+ * May be arbitrarily sized -- need not be a multiple of 2 or 4.
+ * @param Count The number of destination elements.
+ */
+void NvRmMemReadStrided(
+ NvRmMemHandle hMem,
+ NvU32 Offset,
+ NvU32 SrcStride,
+ void *pDst,
+ NvU32 DstStride,
+ NvU32 ElementSize,
+ NvU32 Count);
+
+/**
+ * Writes a strided series of blocks of data to a buffer. This API cannot
+ * fail.
+ *
+ * The total number of bytes copied is Count*ElementSize.
+ *
+ * If hMem refers to an unallocated memory buffer, this function's behavior is
+ * undefined and an assert will trigger in a debug build.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate.
+ * @param Offset Byte offset relative to the base of hMem.
+ * May be arbitrarily aligned -- need not be a multiple of 2 or 4.
+ * @param DstStride The number of bytes separating each destination element.
+ * May be arbitrarily aligned -- need not be a multiple of 2 or 4.
+ * @param pSrc The buffer to obtain the data from.
+ * May be arbitrarily aligned -- need not be located at a word boundary.
+ * @param SrcStride The number of bytes separating each source element.
+ * May be arbitrarily aligned -- need not be a multiple of 2 or 4.
+ * @param ElementSize The number of bytes in each element.
+ * May be arbitrarily sized -- need not be a multiple of 2 or 4.
+ * @param Count The number of source elements.
+ */
+void NvRmMemWriteStrided(
+ NvRmMemHandle hMem,
+ NvU32 Offset,
+ NvU32 DstStride,
+ const void *pSrc,
+ NvU32 SrcStride,
+ NvU32 ElementSize,
+ NvU32 Count);
+
+/**
+ * Moves (copies) a block of data to a different (or the same) hMem. This
+ * API cannot fail. Overlapping copies are supported.
+ *
+ * NOTE: While easy to use, this is NOT the fastest way to copy memory. Using
+ * the 2D engine to perform a blit can be much faster than this function.
+ *
+ * If hDstMem or hSrcMem refers to an unallocated memory buffer, this function's
+ * behavior is undefined and an assert will trigger in a debug build.
+ *
+ * @param hDstMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param DstOffset Byte offset relative to the base of hMem.
+ * May be arbitrarily aligned -- need not be a multiple of 2 or 4.
+ * @param hSrcMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param SrcOffset Byte offset relative to the base of hMem.
+ * May be arbitrarily aligned -- need not be a multiple of 2 or 4.
+ * @param Size The number of bytes of data to be copied from hSrcMem to hDstMem.
+ * May be arbitrarily sized -- need not be a multiple of 2 or 4.
+ */
+
+ void NvRmMemMove(
+ NvRmMemHandle hDstMem,
+ NvU32 DstOffset,
+ NvRmMemHandle hSrcMem,
+ NvU32 SrcOffset,
+ NvU32 Size );
+
+/**
+ * Optionally writes back and/or invalidates a range of the memory from the
+ * data cache, if applicable. Does nothing for memory that was not allocated
+ * as cached. Memory must be mapped into the calling process.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param pMapping Starting address (must be within the mapped region of the
+ hMem) to clean
+ * @param Size The number of bytes of data to be written.
+ * May be arbitrarily sized -- need not be a multiple of 2 or 4.
+ */
+
+void NvRmMemCacheMaint(
+ NvRmMemHandle hMem,
+ void *pMapping,
+ NvU32 Size,
+ NvBool WriteBack,
+ NvBool Invalidate);
+
+/**
+ * Get the size of the buffer associated with a memory handle.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ *
+ * @returns Size in bytes of memory allocated for this handle.
+ */
+
+ NvU32 NvRmMemGetSize(
+ NvRmMemHandle hMem );
+
+/**
+ * Get the alignment of the buffer associated with a memory handle.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ *
+ * @returns Alignment in bytes of memory allocated for this handle.
+ */
+
+ NvU32 NvRmMemGetAlignment(
+ NvRmMemHandle hMem );
+
+/**
+ * Queries the maximum cache line size (in bytes) for all of the caches
+ * L1 and L2 in the system
+ *
+ * @returns The largest cache line size of the system
+ */
+
+ NvU32 NvRmMemGetCacheLineSize(
+ void );
+
+/**
+ * Queries for the heap type associated with a given memory handle. Also
+ * returns base physical address for the buffer, if the type is carveout or
+ * GART. For External type, this parameter does not make sense.
+ *
+ * @param hMem A memory handle returned from NvRmMemHandleCreate/FromId.
+ * @param BasePhysAddr Output parameter receives the physical address of the
+ * buffer.
+ *
+ * @returns The heap type allocated for this memory handle.
+ */
+
+ NvRmHeap NvRmMemGetHeapType(
+ NvRmMemHandle hMem,
+ NvU32 * BasePhysAddr );
+
+/**
+ * Dynamically allocates memory, on CPU this will result in a call to
+ * NvOsAlloc and on AVP, memAPI's are used to allocate memory.
+ * @param size The memory size to be allocated.
+ * @returns Pointer to the allocated buffer.
+ */
+void* NvRmHostAlloc(size_t Size);
+
+/**
+ * Frees a dynamic memory allocation, previously allocated using NvRmHostAlloc.
+ *
+ * @param ptr The pointer to buffer which need to be deallocated.
+ */
+void NvRmHostFree(void* ptr);
+
+/**
+ * This is generally not a publically available function. It is only available
+ * on WinCE to the nvrm device driver. Attempting to use this function will
+ * result in a linker error, you should use NvRmMemMap instead, which will do
+ * the "right" thing for all platforms.
+ *
+ * Under WinCE NvRmMemMap has a custom marshaller, the custom marshaller will
+ * do the following:
+ * - Allocate virtual space
+ * - ioctl to the nvrm driver
+ * - nvrm driver will create a mapping from the allocated buffer to
+ * the newly allocated virtual space.
+ */
+NvError NvRmMemMapIntoCallerPtr(
+ NvRmMemHandle hMem,
+ void *pCallerPtr,
+ NvU32 Offset,
+ NvU32 Size);
+
+/**
+ * Create a unique identifier which can be used from any process/processor
+ * to generate a new memory handle. This can be used to share a memory handle
+ * between processes, or from AVP and CPU.
+ *
+ * Typical usage would be
+ * GetId
+ * Pass Id to client process/procssor
+ * Client calls: NvRmMemHandleFromId
+ *
+ * See Also NvRmMemHandleFromId
+ *
+ * NOTE: Getting an id _does not_ increment the reference count of the
+ * memory handle. You must be sure that whichever process/processor
+ * that is passed an Id calls @NvRmMemHandleFromId@ before you free
+ * a handle.
+ *
+ * @param hMem The memory handle to retrieve the id for.
+ * @returns a unique id that identifies the memory handle.
+ */
+
+ NvU32 NvRmMemGetId(
+ NvRmMemHandle hMem );
+
+/**
+ * Create a new memory handle, which refers to the memory handle identified
+ * by @id@. This function will increment the reference count on the handle.
+ *
+ * See Also NvRmMemGetId
+ *
+ * @param id value that refers to a memory handle, returned from NvRmMemGetId
+ * @param hMem The newly created memory handle
+ * @returns NvSuccess if a unique id is created.
+ */
+
+ NvError NvRmMemHandleFromId(
+ NvU32 id,
+ NvRmMemHandle * hMem );
+
+/**
+ * Get a memory statistics value.
+ *
+ * Querying values may have an effect on system performance and may include
+ * processing, like heap traversal.
+ *
+ * @param Stat NvRmMemStat value that chooses the value to return.
+ * @param Result Result, if the call was successful. Otherwise value
+ * is not touched.
+ * @returns NvSuccess on success, NvError_BadParameter if Stat is
+ * not a valid value, NvError_NotSupported if the Stat is
+ * not available for some reason, or
+ * NvError_InsufficientMemory.
+ */
+
+ NvError NvRmMemGetStat(
+ NvRmMemStat Stat,
+ NvS32 * Result );
+
+#define NVRM_MEM_CHECK_ID 0
+#define NVRM_MEM_TRACE 0
+#if NVRM_MEM_TRACE
+#ifndef NV_IDL_IS_STUB
+#ifndef NV_IDL_IS_DISPATCH
+#define NvRmMemHandleCreate(d,m,s) \
+ NvRmMemHandleCreateTrace(d,m,s,__FILE__,__LINE__)
+#define NvRmMemHandleFree(m) \
+ NvRmMemHandleFreeTrace(m,__FILE__,__LINE__)
+#define NvRmMemGetId(m) \
+ NvRmMemGetIdTrace(m,__FILE__,__LINE__)
+#define NvRmMemHandleFromId(i,m) \
+ NvRmMemHandleFromIdTrace(i,m,__FILE__,__LINE__)
+
+static NV_INLINE NvError NvRmMemHandleCreateTrace(
+ NvRmDeviceHandle hDevice,
+ NvRmMemHandle * phMem,
+ NvU32 Size,
+ const char *file,
+ NvU32 line)
+{
+ NvError err;
+ err = (NvRmMemHandleCreate)(hDevice, phMem, Size);
+ NvOsDebugPrintf("RMMEMTRACE: Create %08x at %s:%d %s\n",
+ (int)*phMem,
+ file,
+ line,
+ err?"FAILED":"");
+ return err;
+}
+
+static NV_INLINE void NvRmMemHandleFreeTrace(
+ NvRmMemHandle hMem,
+ const char *file,
+ NvU32 line)
+{
+ NvOsDebugPrintf("RMMEMTRACE: Free %08x at %s:%d\n",
+ (int)hMem,
+ file,
+ line);
+ (NvRmMemHandleFree)(hMem);
+}
+
+static NV_INLINE NvU32 NvRmMemGetIdTrace(
+ NvRmMemHandle hMem,
+ const char *file,
+ NvU32 line)
+{
+ NvOsDebugPrintf("RMMEMTRACE: GetId %08x at %s:%d\n",
+ (int)hMem,
+ file,
+ line);
+ return (NvRmMemGetId)(hMem);
+}
+
+static NV_INLINE NvError NvRmMemHandleFromIdTrace(
+ NvU32 id,
+ NvRmMemHandle * hMem,
+ const char *file,
+ NvU32 line)
+{
+ NvOsDebugPrintf("RMMEMTRACE: FromId %08x at %s:%d\n",
+ id,
+ file,
+ line);
+ return (NvRmMemHandleFromId)(id,hMem);
+}
+
+#endif // NV_IDL_IS_DISPATCH
+#endif // NV_IDL_IS_STUB
+#endif // NVRM_MEM_TRACE
+
+/** @} */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_minikernel.h b/arch/arm/mach-tegra/include/nvrm_minikernel.h
new file mode 100644
index 000000000000..79b198b3f7d3
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_minikernel.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_MINIKERNEL_H
+#define INCLUDED_NVRM_MINIKERNEL_H
+
+#include "nvrm_init.h"
+
+/**
+ * Called by the secure OS code to initialize the Rm. Usage and
+ * implementation of this API is platform specific.
+ *
+ * This APIs should not be called by the non secure clients of the Rm.
+ *
+ * This APIs is guaranteed to succeed on the supported platforms.
+ *
+ * @param pHandle the RM handle is stored here.
+ */
+void NvRmBasicInit( NvRmDeviceHandle *pHandle );
+
+/**
+ * Closes the Resource Manager for secure os.
+ *
+ * @param hDevice The RM handle. If hDevice is NULL, this API has no effect.
+ */
+void NvRmBasicClose( NvRmDeviceHandle hDevice );
+
+#endif // INCLUDED_NVRM_MINIKERNEL_H
diff --git a/arch/arm/mach-tegra/include/nvrm_module.h b/arch/arm/mach-tegra/include/nvrm_module.h
new file mode 100644
index 000000000000..570ed08dafba
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_module.h
@@ -0,0 +1,732 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_module_H
+#define INCLUDED_nvrm_module_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_init.h"
+
+/**
+ * SOC hardware controller class identifiers.
+ */
+
+typedef enum
+{
+
+ /// Specifies an invalid module ID.
+ NvRmModuleID_Invalid = 0,
+
+ /// Specifies the application processor.
+ NvRmModuleID_Cpu,
+
+ /// Specifies the Audio Video Processor
+ NvRmModuleID_Avp,
+
+ /// Specifies the Vector Co Processor
+ NvRmModuleID_Vcp,
+
+ /// Specifies the display controller.
+ NvRmModuleID_Display,
+
+ /// Specifies the IDE controller.
+ NvRmModuleID_Ide,
+
+ /// Graphics Host
+ NvRmModuleID_GraphicsHost,
+
+ /// Specifies 2D graphics controller
+ NvRmModuleID_2D,
+
+ /// Specifies 3D graphics controller
+ NvRmModuleID_3D,
+
+ /// Specifies VG graphics controller
+ NvRmModuleID_VG,
+
+ /// NV epp (encoder pre-processor)
+ NvRmModuleID_Epp,
+
+ /// NV isp (image signal processor)
+ NvRmModuleID_Isp,
+
+ /// NV vi (video input)
+ NvRmModuleID_Vi,
+
+ /// Specifies USB2 OTG controller
+ NvRmModuleID_Usb2Otg,
+
+ /// Specifies the I2S controller.
+ NvRmModuleID_I2s,
+
+ /// Specifies the Pulse Width Modulator controller.
+ NvRmModuleID_Pwm,
+
+ /// Specifies the Three Wire controller.
+ NvRmModuleID_Twc,
+
+ /// HSMMC controller
+ NvRmModuleID_Hsmmc,
+
+ /// Specifies SDIO controller
+ NvRmModuleID_Sdio,
+
+ /// Specifies the NAND controller.
+ NvRmModuleID_Nand,
+
+ /// Specifies the I2C controller.
+ NvRmModuleID_I2c,
+
+ /// Specifies the Sony Phillips Digital Interface Format controller.
+ NvRmModuleID_Spdif,
+
+ /// Specifies the %UART controller.
+ NvRmModuleID_Uart,
+
+ /// Specifies the timer controller.
+ NvRmModuleID_Timer,
+
+ /// Specifies the timer controller microsecond counter.
+ NvRmModuleID_TimerUs,
+
+ /// Real time clock controller.
+ NvRmModuleID_Rtc,
+
+ /// Specifies the Audio Codec 97 controller.
+ NvRmModuleID_Ac97,
+
+ /// Specifies Audio Bit Stream Engine
+ NvRmModuleID_BseA,
+
+ /// Specifies Video decoder
+ NvRmModuleID_Vde,
+
+ /// Specifies Video encoder (Motion Picture Encoder)
+ NvRmModuleID_Mpe,
+
+ /// Specifies Camera Serial Interface
+ NvRmModuleID_Csi,
+
+ /// Specifies High-Bandwidth Digital Content Protection interface
+ NvRmModuleID_Hdcp,
+
+ /// Specifies High definition Multimedia Interface
+ NvRmModuleID_Hdmi,
+
+ /// Specifies MIPI baseband controller
+ NvRmModuleID_Mipi,
+
+ /// Specifies TV out controller
+ NvRmModuleID_Tvo,
+
+ /// Specifies Serial Display
+ NvRmModuleID_Dsi,
+
+ /// Specifies Dynamic Voltage Controller
+ NvRmModuleID_Dvc,
+
+ /// Specifies the eXtended I/O controller.
+ NvRmModuleID_Xio,
+
+ /// SPI controller
+ NvRmModuleID_Spi,
+
+ /// Specifies SLink controller
+ NvRmModuleID_Slink,
+
+ /// Specifies FUSE controller
+ NvRmModuleID_Fuse,
+
+ /// Specifies KFUSE controller
+ NvRmModuleID_KFuse,
+
+ /// Specifies EthernetMIO controller
+ NvRmModuleID_Mio,
+
+ /// Specifies keyboard controller
+ NvRmModuleID_Kbc,
+
+ /// Specifies Pmif controller
+ NvRmModuleID_Pmif,
+
+ /// Specifies Unified Command Queue
+ NvRmModuleID_Ucq,
+
+ /// Specifies Event controller
+ NvRmModuleID_EventCtrl,
+
+ /// Specifies Flow controller
+ NvRmModuleID_FlowCtrl,
+
+ /// Resource Semaphore
+ NvRmModuleID_ResourceSema,
+
+ /// Arbitration Semaphore
+ NvRmModuleID_ArbitrationSema,
+
+ /// Specifies Arbitration Priority
+ NvRmModuleID_ArbPriority,
+
+ /// Specifies Cache Memory Controller
+ NvRmModuleID_CacheMemCtrl,
+
+ /// Specifies very fast infra red controller
+ NvRmModuleID_Vfir,
+
+ /// Specifies Exception Vector
+ NvRmModuleID_ExceptionVector,
+
+ /// Specifies Boot Strap Controller
+ NvRmModuleID_BootStrap,
+
+ /// Specifies System Statistics Monitor controller
+ NvRmModuleID_SysStatMonitor,
+
+ /// Specifies System
+ NvRmModuleID_Cdev,
+
+ /// Misc module ID which contains registers for PInmux/DAP control etc.
+ NvRmModuleID_Misc,
+
+ // PCIE Device attached to AP20
+ NvRmModuleID_PcieDevice,
+
+ // One-wire interface controller
+ NvRmModuleID_OneWire,
+
+ // Sync NOR controller
+ NvRmModuleID_SyncNor,
+
+ // NOR Memory aperture
+ NvRmModuleID_Nor,
+
+ // AVP UCQ module.
+ NvRmModuleID_AvpUcq,
+
+ /// clock and reset controller
+ NvRmPrivModuleID_ClockAndReset,
+
+ /// interrupt controller
+ NvRmPrivModuleID_Interrupt,
+
+ /// interrupt controller Arbitration Semaphore grant registers
+ NvRmPrivModuleID_InterruptArbGnt,
+
+ /// interrupt controller DMA Tx/Rx DRQ registers
+ NvRmPrivModuleID_InterruptDrq,
+
+ /// interrupt controller special SW interrupt
+ NvRmPrivModuleID_InterruptSw,
+
+ /// interrupt controller special CPU interrupt
+ NvRmPrivModuleID_InterruptCpu,
+
+ /// Apb Dma controller
+ NvRmPrivModuleID_ApbDma,
+
+ /// Apb Dma Channel
+ NvRmPrivModuleID_ApbDmaChannel,
+
+ /// Gpio controller
+ NvRmPrivModuleID_Gpio,
+
+ /// Pin-Mux Controller
+ NvRmPrivModuleID_PinMux,
+
+ /// memory configuation
+ NvRmPrivModuleID_Mselect,
+
+ /// memory controller (internal memory and memory arbitration)
+ NvRmPrivModuleID_MemoryController,
+
+ /// external memory (ddr ram, etc.)
+ NvRmPrivModuleID_ExternalMemoryController,
+
+ /// Processor Id
+ NvRmPrivModuleID_ProcId,
+
+ /// Entire System (used for system reset)
+ NvRmPrivModuleID_System,
+
+ /* CC device id (not sure what it actually does, but it is needed to
+ * set the mem_init_done bit so that memory works).
+ */
+ NvRmPrivModuleID_CC,
+
+ /// AHB Arbitration Control
+ NvRmPrivModuleID_Ahb_Arb_Ctrl,
+
+ /// AHB Gizmo Control
+ NvRmPrivModuleID_Ahb_Gizmo_Ctrl,
+
+ /// External memory
+ NvRmPrivModuleID_ExternalMemory,
+
+ /// Internal memory
+ NvRmPrivModuleID_InternalMemory,
+
+ /// TCRAM
+ NvRmPrivModuleID_Tcram,
+
+ /// IRAM
+ NvRmPrivModuleID_Iram,
+
+ /// GART
+ NvRmPrivModuleID_Gart,
+
+ /// MIO/EXIO
+ NvRmPrivModuleID_Mio_Exio,
+
+ /* External PMU */
+ NvRmPrivModuleID_PmuExt,
+
+ /* One module ID for all peripherals which includes cache controller,
+ * SCU and interrupt controller */
+ NvRmPrivModuleID_ArmPerif,
+ NvRmPrivModuleID_ArmInterruptctrl,
+
+ /* PCIE Root Port internally is made up of 3 major blocks. These 3 blocks
+ * have seperate reset and clock domains. So, the driver treats these
+ *
+ * AFI is the wrapper on the top of the PCI core.
+ * PCIe refers to the core PCIe state machine module.
+ * PcieXclk refers to the transmit/receive logic which runs at different
+ * clock and have different reset.
+ * */
+ NvRmPrivModuleID_Afi,
+ NvRmPrivModuleID_Pcie,
+ NvRmPrivModuleID_PcieXclk,
+
+ /* PL310 */
+ NvRmPrivModuleID_Pl310,
+
+ /*
+ * AHB re-map aperture seen from AVP. Use this aperture for AVP to have
+ * uncached access to SDRAM.
+ */
+ NvRmPrivModuleID_AhbRemap,
+ NvRmModuleID_Num,
+ NvRmModuleID_Force32 = 0x7FFFFFFF
+} NvRmModuleID;
+
+/* FIXME
+ * Hack to make the existing drivers work.
+ * NvRmPriv* should be renamed to NvRm*
+ */
+#define NvRmPrivModuleID_Num NvRmModuleID_Num
+
+/**
+ * Multiple module instances are handled by packing the instance number into
+ * the high bits of the module id. This avoids ponderous apis with both
+ * module ids and instance numbers.
+ */
+
+/**
+ * Create a module id with a given instance.
+ */
+#define NVRM_MODULE_ID( id, instance ) \
+ ((NvRmModuleID)( (instance) << 16 | id ) )
+
+/**
+ * Get the actual module id.
+ */
+#define NVRM_MODULE_ID_MODULE( id ) ((id) & 0xFFFF)
+
+/**
+ * Get the instance number of the module id.
+ */
+#define NVRM_MODULE_ID_INSTANCE( id ) (((id) >> 16) & 0xFFFF)
+
+/**
+ * Module Information structure
+ */
+
+typedef struct NvRmModuleInfoRec
+{
+ NvU32 Instance;
+ NvRmPhysAddr BaseAddress;
+ NvU32 Length;
+} NvRmModuleInfo;
+
+/**
+ * Returns list of available module instances and their information.
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param Module The module for which to get the number of instances.
+ * @param pNum Unsigned integer indicating the number of module information
+ * structures in the array pModuleInfo.
+ * @param pModuleInfo A pointer to an array of module information structure,
+ * where the size of array is determined by the value in pNum.
+ *
+ * @retval NvSuccess If successful, or the appropriate error.
+ */
+
+ NvError NvRmModuleGetModuleInfo(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID module,
+ NvU32 * pNum,
+ NvRmModuleInfo * pModuleInfo );
+
+/**
+ * Returns a physical address associated with a hardware module.
+ * (To be depcreated and replaced by NvRmModuleGetModuleInfo)
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param Module the module for which to get addresses.
+ * @param pBaseAddress a pointer to the beginning of the
+ * hardware register bank is stored here.
+ * @param pSize the length of the aperture in bytes is stored
+ * here.
+ */
+
+ void NvRmModuleGetBaseAddress(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID Module,
+ NvRmPhysAddr * pBaseAddress,
+ NvU32 * pSize );
+
+/**
+ * Returns the number of instances of a particular hardware module.
+ * (To be depcreated and replaced by NvRmModuleGetModuleInfo)
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param Module The module for which to get the number of instances.
+ *
+ * @returns Number of instances.
+ */
+
+ NvU32 NvRmModuleGetNumInstances(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID Module );
+
+/**
+ * Resets the module controller hardware.
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param Module The module to reset
+ */
+
+ void NvRmModuleReset(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID Module );
+
+/**
+ * Resets the controller with an option to hold the controller in the reset.
+ *
+ * @param hRmDeviceHandle Rm device handle
+ * @param Module The module to be reset
+ * @param bHold If NV_TRUE hold the module in reset, If NV_TRUE pulse the
+ * reset.
+ *
+ * So, to keep the module in reset and do something
+ * NvRmModuleResetWithHold(hRm, ModId, NV_TRUE)
+ * ... update some registers
+ * NvRmModuleResetWithHold(hRm, ModId, NV_FALSE)
+ */
+
+ void NvRmModuleResetWithHold(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID Module,
+ NvBool bHold );
+
+/**
+ * DDK capability encapsualtion. See NvRmModuleGetCapabilities().
+ */
+
+typedef struct NvRmModuleCapabilityRec
+{
+ NvU8 MajorVersion;
+ NvU8 MinorVersion;
+ NvU8 EcoLevel;
+ void* Capability;
+} NvRmModuleCapability;
+
+/**
+ * Returns a pointer to a class-specific capabilities structure.
+ *
+ * Each DDK will supply a list of NvRmCapability structures sorted by module
+ * Minor and Eco levels (assuming that no DDK supports two Major versions
+ * simulatenously). The last cap in the list that matches the hardware's
+ * version and eco level will be returned. If the current hardware's eco
+ * level is higher than the given module capability list, the last module
+ * capability with the highest eco level (the last in the list) will be
+ * returned.
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param Module the target module
+ * @param pCaps Pointer to the capability list
+ * @param NumCaps The number of capabilities in the list
+ * @param Capability Out parameter: the cap that maches the current hardware
+ *
+ * Example usage:
+ *
+ * typedef struct FakeDdkCapRec
+ * {
+ * NvU32 FeatureBits;
+ * } FakeDdkCap;
+ *
+ * FakeDdkCap cap1;
+ * FakeDdkCap cap2;
+ * FakeDdkCap *cap;
+ * NvRmModuleCapability caps[] =
+ * { { 1, 0, 0, &fcap1 },
+ * { 1, 1, 0, &fcap2 },
+ * };
+ * cap1.bits = ...;
+ * cap2.bits = ...;
+ * err = NvRmModuleGetCapabilities( hDevice, NvRmModuleID_FakeDDK, caps, 2,
+ * (void *)&cap );
+ * ...
+ * if( cap->FeatureBits & FAKEDKK_SOME_FEATURE )
+ * {
+ * ...
+ * }
+ */
+
+ NvError NvRmModuleGetCapabilities(
+ NvRmDeviceHandle hDeviceHandle,
+ NvRmModuleID Module,
+ NvRmModuleCapability * pCaps,
+ NvU32 NumCaps,
+ void* * Capability );
+
+/**
+ * @brief Queries for the device unique ID.
+ *
+ * @pre Not callable from early boot.
+ *
+ * @param pId A pointer to an area of caller-allocated memory to hold the
+ * unique ID.
+ * @param pIdSize an input, a pointer to a variable containing the size of
+ * the caller-allocated memory to hold the unique ID pointed to by \em pId.
+ * Upon successful return, this value is updated to reflect the actual
+ * size of the unique ID returned in \em pId.
+ *
+ * @retval ::NvError_Success \em pId points to the unique ID and \em pIdSize
+ * points to the actual size of the ID.
+ * @retval ::NvError_BadParameter
+ * @retval ::NvError_NotSupported
+ * @retval ::NvError_InsufficientMemory
+ */
+
+ NvError NvRmQueryChipUniqueId(
+ NvRmDeviceHandle hDevHandle,
+ NvU32 IdSize,
+ void* pId );
+
+/**
+ * @brief Returns random bytes using hardware sources of entropy
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param NumBytes Number of random bytes to return in pBytes.
+ * @param pBytes Array where the random bytes should be stored
+ *
+ * @retval ::NvError_Success
+ * @retval ::NvError_BadParameter
+ * @retval ::NvError_NotSupported If no hardware entropy source is available
+ */
+
+ NvError NvRmGetRandomBytes(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 NumBytes,
+ void* pBytes );
+
+/*
+ * Module access functions below.
+ * NOTE: Rm doesn't gaurantee access to all the modules as it only maps a few
+ * modules.
+ * This is not meant to be a primary mechanism to access the module registers.
+ * Clients should map their register address and access the registers.
+ */
+
+/**
+ * NV_REGR: register read from hardware.
+ *
+ * @param rm The resource manager istance
+ * @param aperture The register aperture
+ * @param instance The module instance
+ * @param offset The offset inside the aperture
+ *
+ * Note that the aperture comes from the RM's private module id enumeration,
+ * which is a superset of the public enumeration from nvrm_module.h.
+ */
+
+/**
+ * NV_REGW: register write to hardware.
+ *
+ * @param rm The resource manager istance
+ * @param aperture The register aperture
+ * @param instance The module instance
+ * @param offset The offset inside the aperture
+ * @param data The data to write
+ *
+ * see the note regarding apertures for NV_REGR.
+ */
+#define NV_REGR(rm, aperture, instance, offset) \
+ NvRegr((rm),(NvRmModuleID)(aperture),(instance),(offset))
+
+#define NV_REGW(rm, aperture, instance, offset, data) \
+ NvRegw((rm),(NvRmModuleID)(aperture),(instance),(offset),(data))
+
+
+ NvU32 NvRegr(
+ NvRmDeviceHandle hDeviceHandle,
+ NvRmModuleID aperture,
+ NvU32 instance,
+ NvU32 offset );
+
+ void NvRegw(
+ NvRmDeviceHandle hDeviceHandle,
+ NvRmModuleID aperture,
+ NvU32 instance,
+ NvU32 offset,
+ NvU32 data );
+
+/**
+ * NV_REGR_MULT: read multiple registers from hardware
+ *
+ * @param rm The resource manager istance
+ * @param aperture The register aperture
+ * @param instance The module instance
+ * @param num The number of registers
+ * @param offsets The register offsets
+ * @param values The register values
+ */
+
+/**
+ * NV_REGW_MULT: write multiple registers from hardware
+ *
+ * @param rm The resource manager istance
+ * @param aperture The register aperture
+ * @param instance The module instance
+ * @param num The number of registers
+ * @param offsets The register offsets
+ * @param values The register values
+ */
+
+/**
+ * NV_REGW_BLOCK: write a block of registers to hardware
+ *
+ * @param rm The resource manager istance
+ * @param aperture The register aperture
+ * @param instance The module instance
+ * @param num The number of registers
+ * @param offset The beginning register offset
+ * @param values The register values
+ */
+
+/**
+ * NV_REGR_BLOCK: read a block of registers from hardware
+ *
+ * @param rm The resource manager istance
+ * @param aperture The register aperture
+ * @param instance The module instance
+ * @param num The number of registers
+ * @param offset The beginning register offset
+ * @param values The register values
+ */
+
+#define NV_REGR_MULT(rm, aperture, instance, num, offsets, values) \
+ NvRegrm((rm),(NvRmModuleID)(aperture),(instance),(num),(offsets),(values))
+
+#define NV_REGW_MULT(rm, aperture, instance, num, offsets, values) \
+ NvRegwm((rm),(NvRmModuleID)(aperture),(instance),(num),(offsets),(values))
+
+#define NV_REGW_BLOCK(rm, aperture, instance, num, offset, values) \
+ NvRegwb((rm),(NvRmModuleID)(aperture),(instance),(num),(offset),(values))
+
+#define NV_REGR_BLOCK(rm, aperture, instance, num, offset, values) \
+ NvRegrb((rm),(NvRmModuleID)(aperture),(instance),(num),(offset),(values))
+
+ void NvRegrm(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID aperture,
+ NvU32 instance,
+ NvU32 num,
+ const NvU32 * offsets,
+ NvU32 * values );
+
+ void NvRegwm(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID aperture,
+ NvU32 instance,
+ NvU32 num,
+ const NvU32 * offsets,
+ const NvU32 * values );
+
+ void NvRegwb(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID aperture,
+ NvU32 instance,
+ NvU32 num,
+ NvU32 offset,
+ const NvU32 * values );
+
+ void NvRegrb(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID aperture,
+ NvU32 instance,
+ NvU32 num,
+ NvU32 offset,
+ NvU32 * values );
+
+#define NV_REGR08(rm, aperture, instance, offset) \
+ NvRegr08((rm),(NvRmModuleID)(aperture),(instance),(offset))
+
+#define NV_REGW08(rm, aperture, instance, offset, data) \
+ NvRegw08((rm),(NvRmModuleID)(aperture),(instance),(offset),(data))
+
+ NvU8 NvRegr08(
+ NvRmDeviceHandle hDeviceHandle,
+ NvRmModuleID aperture,
+ NvU32 instance,
+ NvU32 offset );
+
+ void NvRegw08(
+ NvRmDeviceHandle rm,
+ NvRmModuleID aperture,
+ NvU32 instance,
+ NvU32 offset,
+ NvU8 data );
+
+/** @} */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_owr.h b/arch/arm/mach-tegra/include/nvrm_owr.h
new file mode 100644
index 000000000000..65b96ff8f946
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_owr.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_owr_H
+#define INCLUDED_nvrm_owr_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_pinmux.h"
+#include "nvrm_module.h"
+#include "nvrm_init.h"
+
+#include "nvos.h"
+#include "nvcommon.h"
+
+/**
+ * NvRmOwrHandle is an opaque handle for the RM OWR driver.
+ */
+
+typedef struct NvRmOwrRec *NvRmOwrHandle;
+
+/**
+ * @brief Open the OWR driver. This function allocates the
+ * RM OWR handle.
+ *
+ * Assert encountered in debug mode if passed parameter is invalid.
+ *
+ * @param hDevice Handle to the Rm device which is required by Rm to acquire
+ * the resources from RM.
+ * @param instance Instance of the OWR controller to be opened. Starts from 0.
+ * @param phOwr Points to the location where the OWR handle shall be stored.
+ *
+ * @retval NvSuccess OWR driver opened successfully.
+ * @retval NvError_InsufficientMemory Indicates that function fails to allocate
+ * the memory.
+ */
+
+ NvError NvRmOwrOpen(
+ NvRmDeviceHandle hDevice,
+ NvU32 instance,
+ NvRmOwrHandle * hOwr );
+
+/**
+ * @brief Closes the OWR driver. Disables the clock and invalidates the OWR handle.
+ * This API never fails.
+ *
+ * @param hOwr A handle from NvRmOwrOpen(). If hOwr is NULL, this API does
+ * nothing.
+ */
+
+ void NvRmOwrClose(
+ NvRmOwrHandle hOwr );
+
+/**
+ * Defines OWR transaction flags.
+ */
+
+typedef enum
+{
+
+ /// OWR read the unique address of the device.
+ NvRmOwr_ReadAddress = 1,
+
+ /// OWR memory read transaction.
+ NvRmOwr_MemRead,
+
+ /// OWR memory write transaction.
+ NvRmOwr_MemWrite,
+ NvRmOwrTransactionFlags_Num,
+ NvRmOwrTransactionFlags_Force32 = 0x7FFFFFFF
+} NvRmOwrTransactionFlags;
+
+/**
+ * Defines OWR transaction info structure. Contains details of the transaction.
+ */
+
+typedef struct NvRmOwrTransactionInfoRec
+{
+
+ /// Transaction type flags. See @NvRmOwrTransactionFlags
+ NvU32 Flags;
+
+ /// Offset in the OWR device where Memory read/write operations need to be performed.
+ NvU32 Offset;
+
+ /// Number of bytes to read/write.
+ NvU32 NumBytes;
+
+ /// OWR device ROM Id. This can be zero, if there is a single OWR device on the bus.
+ NvU32 Address;
+} NvRmOwrTransactionInfo;
+
+/**
+ * @brief Does multiple OWR transactions. Each transaction can be a read or write.
+ *
+ * @param hOwr Handle to the OWR channel.
+ * @param OwrPinMap for OWR controllers which are being multiplexed across
+ * multiple pin mux configurations, this specifies which pin mux configuration
+ * should be used for the transaction. Must be 0 when the ODM pin mux query
+ * specifies a non-multiplexed configuration for the controller.
+ * @param Data Pointer to the buffer for all the required read, write transactions.
+ * @param DataLength Length of the data buffer.
+ * @param Transcations Pointer to the NvRmOwrTransactionInfo structure.
+ * See @NvRmOwrTransactionInfo
+ * @param NumOfTransactions Number of transcations
+ *
+ *
+ * @retval NvSuccess OWR Transaction succeeded.
+ * @retval NvError_NotSupported Indicates assumption on parameter values violated.
+ * @retval NvError_InvalidState Indicates that the last read or write call is not
+ * completed.
+ * @retval NvError_ControllerBusy Indicates controller is presently busy with an
+ * OWR transaction.
+ */
+
+ NvError NvRmOwrTransaction(
+ NvRmOwrHandle hOwr,
+ NvU32 OwrPinMap,
+ NvU8 * Data,
+ NvU32 DataLen,
+ NvRmOwrTransactionInfo * Transaction,
+ NvU32 NumOfTransactions );
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_pcie.h b/arch/arm/mach-tegra/include/nvrm_pcie.h
new file mode 100644
index 000000000000..3a3c7858d98b
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_pcie.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_pcie_H
+#define INCLUDED_nvrm_pcie_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_module.h"
+#include "nvrm_init.h"
+
+typedef enum
+{
+
+ // NvRm PCIE access type read
+ NvRmPcieAccessType_Read,
+
+ // NvRm PCIE access type write
+ NvRmPcieAccessType_Write,
+ NvRmPcieAccessType_Num,
+ NvRmPcieAccessType_Force32 = 0x7FFFFFFF
+} NvRmPcieAccessType;
+
+
+/** Reads or writes the config space of the PCI device.
+ *
+ * @param hRmDeviceHandle The Rm device handle
+ * @param bus_number Bus number on on which the device is present.
+ * @param type Specifies the access type
+ * @param offset Start offset to read the configuration data
+ * @param Data Data in bytes used to read/write from/to device config space,
+ * depending on the access type.
+ * @param DataLen Sepcifies the length of Data Array.
+ *
+ * Returns NvSuccess or the appropriate error code.
+ */
+
+ NvError NvRmReadWriteConfigSpace(
+ NvRmDeviceHandle hDeviceHandle,
+ NvU32 bus_number,
+ NvRmPcieAccessType type,
+ NvU32 offset,
+ NvU8 * Data,
+ NvU32 DataLen );
+
+
+/** Registers a MSI handler for the device at an index.
+ *
+ * @param hRmDeviceHandle The Rm device handle
+ * @param function_device_bus function/device/bus tuple.
+ * @param index Msi index. Some devices support more than 1 MSI. For those
+ * devices, index value is from (0 to max-1)
+ * @param sem Semaphore which will be signalled when the MSI interrupt is
+ * triggered.
+ * @param InterruptEnable To enable or disable interrupt.
+ *
+ * Returns NvSuccess or the appropriate error code.
+ */
+
+
+ NvError NvRmRegisterPcieMSIHandler(
+ NvRmDeviceHandle hDeviceHandle,
+ NvU32 function_device_bus,
+ NvU32 index,
+ NvOsSemaphoreHandle sem,
+ NvBool InterruptEnable );
+
+ NvError NvRmRegisterPcieLegacyHandler(
+ NvRmDeviceHandle hDeviceHandle,
+ NvU32 function_device_bus,
+ NvOsSemaphoreHandle sem,
+ NvBool InterruptEnable );
+
+// PCIE address map supports 64-bit addressing. But, RM driver only supports
+// 32-addressing. In the future, if the device supports 64-bit addressing, one
+// can change this typedef.
+
+typedef NvU32 NvRmPciPhysAddr;
+
+/**
+ * Attemtps to map the Pcie memory to the 32-bit AXI address region.
+ * Ap20 reserves only 1GB PCIe aperture. Out of that 1GB, some region is reserved for
+ * the register/config/msi access. Only 768MB is left out for the PCIe memory aperture.
+ *
+ * @param hRmDeviceHandle Rm device handle
+ * @param mem "Base address registers" of a PCI device.
+ *
+ * Returns the mapped AXI address. If the mapping fails, it returns 0.
+ */
+
+ NvRmPhysAddr NvRmMapPciMemory(
+ NvRmDeviceHandle hDeviceHandle,
+ NvRmPciPhysAddr mem,
+ NvU32 size );
+
+/** Unmaps the PCI to AXI address mapping
+ *
+ * @param hRmDeviceHandle Rm device handle
+ * @param mem AXI addresses mapped by calling NvRmMapPcieMemory
+ * API.
+ */
+
+ void NvRmUnmapPciMemory(
+ NvRmDeviceHandle hDeviceHandle,
+ NvRmPhysAddr mem,
+ NvU32 size );
+
+/** @} */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_pinmux.h b/arch/arm/mach-tegra/include/nvrm_pinmux.h
new file mode 100644
index 000000000000..bafe6a2655b4
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_pinmux.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_pinmux_H
+#define INCLUDED_nvrm_pinmux_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_module.h"
+#include "nvrm_init.h"
+
+#include "nvodm_modules.h"
+
+/**
+ * For each module that has pins (an I/O module), there may be several muxing
+ * configurations. This allows a driver to select or query a particular
+ * configuration per I/O module. I/O modules may be instantiated on the
+ * chip multiple times.
+ *
+ * Certain combinations of modules configurations may not be physically
+ * possible; say that a hypothetical SPI controller configuration 3 uses pins
+ * that are shared by a hypothectial UART configuration 2. Presently, these
+ * conflicting configurations are managed via an external tool provided by
+ * SysEng, which identifies the configurations for the ODM pin-mux tables
+ * depending upon choices made by the ODM.
+ */
+
+/**
+ * Sets the module to tristate configuration.
+ * Use enable to release the pinmux. The pins will be
+ * tri-stated when not in use to save power.
+ *
+ * @param hDevice The RM instance
+ * @param RmModule The module to set
+ * @param EnableTristate NV_TRUE will tristate the specified pins, NV_FALSE will un-tristate
+ */
+
+ NvError NvRmSetModuleTristate(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID RmModule,
+ NvBool EnableTristate );
+
+/**
+ * Sets an ODM module ID to tristate configuration. Analagous to @see NvRmSetModuleTristate,
+ * but indexed based on the ODM module ID, rather than the controller ID.
+ *
+ * @param hDevice The RM instance
+ * @param OdmModule The module to set (should be of type NvOdmIoModule)
+ * @param OdmInstance The instance of the module to set
+ * @param EnableTristate NV_TRUE will tristate the specified pins, NV_FALSE will un-tristate
+ */
+
+ NvError NvRmSetOdmModuleTristate(
+ NvRmDeviceHandle hDevice,
+ NvU32 OdmModule,
+ NvU32 OdmInstance,
+ NvBool EnableTristate );
+
+/**
+ * Configures modules which can provide clock sources to peripherals.
+ * If a Tegra application processor is expected to provide a clock source
+ * to an external peripheral, this API should be called to configure the
+ * clock source and to ensure that its pins are driven prior to attempting
+ * to program the peripheral through a command interface (e.g., SPI).
+ *
+ * @param hDevice The RM instance
+ * @param IoModule The module to set, must be NvOdmIoModule_ExternalClock
+ * @param Instance The instance of the I/O module to be set.
+ * @param Config The pin map configuration for the I/O module.
+ * @param EnableTristate NV_TRUE will tristate the specified clock source,
+ * NV_FALSE will drive it.
+ *
+ * @retval Returns the clock frequency, in KHz, that is output on the
+ * designated pin (or '0' if no clock frequency is specified or found).
+ */
+
+ NvU32 NvRmExternalClockConfig(
+ NvRmDeviceHandle hDevice,
+ NvU32 IoModule,
+ NvU32 Instance,
+ NvU32 Config,
+ NvBool EnableTristate );
+
+typedef struct NvRmModuleSdmmcInterfaceCapsRec
+{
+
+ /// Maximum bus width supported by the physical interface
+ /// Will be 2, 4 or 8 depending on the selected pin mux
+ NvU32 MmcInterfaceWidth;
+} NvRmModuleSdmmcInterfaceCaps;
+
+typedef struct NvRmModulePcieInterfaceCapsRec
+{
+
+ /// Maximum bus type supported by the physical interface
+ /// Will be 4X1 or 2X2 depending on the selected pin mux
+ NvU32 PcieNumEndPoints;
+ NvU32 PcieLanesPerEp;
+} NvRmModulePcieInterfaceCaps;
+
+typedef struct NvRmModulePwmInterfaceCapsRec
+{
+
+ /// The OR bits value of PWM Output IDs supported by the
+ /// physical interface depending on the selected pin mux.
+ /// Hence, PwmOutputId_PWM0 = bit 0, PwmOutputId_PWM1 = bit 1,
+ /// PwmOutputId_PWM2 = bit 2, PwmOutputId_PWM3 = bit 3
+ NvU32 PwmOutputIdSupported;
+} NvRmModulePwmInterfaceCaps;
+
+typedef struct NvRmModuleNandInterfaceCapsRec
+{
+
+ /// Maximum bus width supported by the physical interface
+ /// Will be 8 or 16 depending on the selected pin mux
+ NvU8 NandInterfaceWidth;
+ NvBool IsCombRbsyMode;
+} NvRmModuleNandInterfaceCaps;
+
+typedef struct NvRmModuleUartInterfaceCapsRec
+{
+
+ /// Maximum number of the interface lines supported by the physical interface.
+ /// Will be 0, 2, 4 or 8 depending on the selected pin mux.
+ /// 0 means there is no physical interface for the uart.
+ /// 2 means only rx/tx lines are supported.
+ /// 4 means only rx/tx/rtx/cts lines are supported.
+ /// 8 means full modem lines are supported.
+ NvU32 NumberOfInterfaceLines;
+} NvRmModuleUartInterfaceCaps;
+
+/**
+ * @brief Query the board-defined capabilities of an I/O controller
+ *
+ * This API will return capabilities for controller modules based on
+ * interface properties defined by ODM query interfaces, such as the
+ * pin mux query.
+ *
+ * pCap should be a pointer to the matching NvRmxxxInterfaceCaps structure
+ * (defined above) for the ModuleId, and CapStructSize should be
+ * the sizeof(structure type). and also should be word aligned.
+ *
+ * @retval NvError_NotSupported if the specified ModuleID does not
+ * exist on the current platform.
+ */
+
+ NvError NvRmGetModuleInterfaceCapabilities(
+ NvRmDeviceHandle hRm,
+ NvRmModuleID ModuleId,
+ NvU32 CapStructSize,
+ void* pCaps );
+
+/**
+ * Defines SoC strap groups.
+ */
+
+typedef enum
+{
+
+ /// ram_code strap group
+ NvRmStrapGroup_RamCode = 1,
+ NvRmStrapGroup_Num,
+ NvRmStrapGroup_Force32 = 0x7FFFFFFF
+} NvRmStrapGroup;
+
+/**
+ * Gets SoC strap value for the given strap group.
+ *
+ * @param hDevice The RM instance
+ * @param StrapGroup Strap group to be read.
+ * @pStrapValue A pointer to the returned strap group value.
+ *
+ * @retval NvSuccess if strap value is read successfully
+ * @retval NvError_NotSupported if the specified strap group does not
+ * exist on the current SoC.
+ */
+
+ NvError NvRmGetStraps(
+ NvRmDeviceHandle hDevice,
+ NvRmStrapGroup StrapGroup,
+ NvU32 * pStrapValue );
+
+/** @} */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_pmu.h b/arch/arm/mach-tegra/include/nvrm_pmu.h
new file mode 100644
index 000000000000..7ab6fa92d309
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_pmu.h
@@ -0,0 +1,420 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_pmu_H
+#define INCLUDED_nvrm_pmu_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_init.h"
+
+/**
+ * @defgroup nvrm_pmu
+ *
+ * This is the power management unit (PMU) API for Rm, which
+ * handles the abstraction of external power management devices.
+ * For NVIDIA&reg; Driver Development Kit (DDK) clients, PMU is a
+ * set of voltages used to provide power to the SoC or to monitor low battery
+ * conditions. The API allows DDK clients to determine whether the
+ * particular voltage is supported by the ODM platform, retrieve the
+ * capabilities of PMU, and get/set voltage levels at runtime.
+ *
+ * All voltage rails are referenced using ODM-assigned unsigned integers. ODMs
+ * may select any convention for assigning these values; however, the values
+ * accepted as input parameters by the PMU ODM adaptation interface must
+ * match the values stored in the address field of \c NvRmIoModule_Vdd buses
+ * defined in the Peripheral Discovery ODM adaptation.
+ *
+ *
+ * @ingroup nvrm_pmu
+ * @{
+ */
+
+/**
+ * Combines information for the particular PMU Vdd rail.
+ */
+
+typedef struct NvRmPmuVddRailCapabilitiesRec
+{
+
+ /// Specifies ODM protection attribute; if \c NV_TRUE PMU hardware
+ /// or ODM Kit would protect this voltage from being changed by NvDdk client.
+ NvBool RmProtected;
+
+ /// Specifies the minimum voltage level in mV.
+ NvU32 MinMilliVolts;
+
+ /// Specifies the step voltage level in mV.
+ NvU32 StepMilliVolts;
+
+ /// Specifies the maximum voltage level in mV.
+ NvU32 MaxMilliVolts;
+
+ /// Specifies the request voltage level in mV.
+ NvU32 requestMilliVolts;
+} NvRmPmuVddRailCapabilities;
+
+/// Special level to indicate voltage plane is disabled.
+#define ODM_VOLTAGE_OFF (0UL)
+
+/**
+ * Gets capabilities for the specified PMU voltage.
+ *
+ * @param vddId The ODM-defined PMU rail ID.
+ * @param pCapabilities A pointer to the targeted
+ * capabilities returned by the ODM.
+ */
+
+ void NvRmPmuGetCapabilities(
+ NvRmDeviceHandle hDevice,
+ NvU32 vddId,
+ NvRmPmuVddRailCapabilities * pCapabilities );
+
+/**
+ * Gets current voltage level for the specified PMU voltage.
+ *
+ * @param hDevice The Rm device handle.
+ * @param vddId The ODM-defined PMU rail ID.
+ * @param pMilliVolts A pointer to the voltage level returned
+ * by the ODM.
+ */
+
+ void NvRmPmuGetVoltage(
+ NvRmDeviceHandle hDevice,
+ NvU32 vddId,
+ NvU32 * pMilliVolts );
+
+/**
+ * Sets new voltage level for the specified PMU voltage.
+ *
+ * @param hDevice The Rm device handle.
+ * @param vddId The ODM-defined PMU rail ID.
+ * @param MilliVolts The new voltage level to be set in millivolts (mV).
+ * Set to \c ODM_VOLTAGE_OFF to turn off the target voltage.
+ * @param pSettleMicroSeconds A pointer to the settling time in microseconds (uS),
+ * which is the time for supply voltage to settle after this function
+ * returns; this may or may not include PMU control interface transaction time,
+ * depending on the ODM implementation. If null this parameter is ignored.
+ */
+
+ void NvRmPmuSetVoltage(
+ NvRmDeviceHandle hDevice,
+ NvU32 vddId,
+ NvU32 MilliVolts,
+ NvU32 * pSettleMicroSeconds );
+
+/**
+ * Configures SoC power rail controls for the upcoming PMU voltage transition.
+ *
+ * @note Should be called just before PMU rail On/Off, or Off/On transition.
+ * Should not be called if rail voltage level is changing within On range.
+ *
+ * @param hDevice The Rm device handle.
+ * @param vddId The ODM-defined PMU rail ID.
+ * @param Enable Set NV_TRUE if target voltage is about to be turned On, or
+ * NV_FALSE if target voltage is about to be turned Off.
+ */
+
+ void NvRmPmuSetSocRailPowerState(
+ NvRmDeviceHandle hDevice,
+ NvU32 vddId,
+ NvBool Enable );
+
+/**
+ * Defines Charging path.
+ */
+
+typedef enum
+{
+
+ /// Specifies external wall plug charger.
+ NvRmPmuChargingPath_MainPlug,
+
+ /// Specifies external USB bus charger.
+ NvRmPmuChargingPath_UsbBus,
+ NvRmPmuChargingPath_Num,
+ NvRmPmuChargingPath_Force32 = 0x7FFFFFFF
+} NvRmPmuChargingPath;
+
+/// Special level to indicate dumb charger current limit.
+#define NVODM_DUMB_CHARGER_LIMIT (0xFFFFFFFFUL)
+
+/**
+ * Defines AC status.
+ */
+
+typedef enum
+{
+
+ /// Specifies AC is offline.
+ NvRmPmuAcLine_Offline,
+
+ /// Specifies AC is online.
+ NvRmPmuAcLine_Online,
+
+ /// Specifies backup power.
+ NvRmPmuAcLine_BackupPower,
+ NvRmPmuAcLineStatus_Num,
+ NvRmPmuAcLineStatus_Force32 = 0x7FFFFFFF
+} NvRmPmuAcLineStatus;
+
+/** @name Battery Status Defines */
+/*@{*/
+
+#define NVODM_BATTERY_STATUS_HIGH 0x01
+#define NVODM_BATTERY_STATUS_LOW 0x02
+#define NVODM_BATTERY_STATUS_CRITICAL 0x04
+#define NVODM_BATTERY_STATUS_CHARGING 0x08
+#define NVODM_BATTERY_STATUS_NO_BATTERY 0x80
+#define NVODM_BATTERY_STATUS_UNKNOWN 0xFF
+
+/*@}*/
+/** @name Battery Data Defines */
+/*@{*/
+#define NVODM_BATTERY_DATA_UNKNOWN 0x7FFFFFFF
+
+/*@}*/
+
+/**
+ * Defines battery instances.
+ */
+
+typedef enum
+{
+
+ /// Specifies main battery.
+ NvRmPmuBatteryInst_Main,
+ NvRmPmuBatteryInst_Backup,
+ NvRmPmuBatteryInstance_Num,
+ NvRmPmuBatteryInstance_Force32 = 0x7FFFFFFF
+} NvRmPmuBatteryInstance;
+
+/**
+ * Defines battery data.
+ */
+
+typedef struct NvRmPmuBatteryDataRec
+{
+
+ /// Specifies battery life percent.
+ NvU32 batteryLifePercent;
+
+ /// Specifies battery life time.
+ NvU32 batteryLifeTime;
+
+ /// Specifies voltage.
+ NvU32 batteryVoltage;
+
+ /// Specifies battery current.
+ NvS32 batteryCurrent;
+
+ /// Specifies battery average current.
+ NvS32 batteryAverageCurrent;
+
+ /// Specifies battery interval.
+ NvU32 batteryAverageInterval;
+
+ /// Specifies the mAH consumed.
+ NvU32 batteryMahConsumed;
+
+ /// Specifies battery temperature.
+ NvU32 batteryTemperature;
+} NvRmPmuBatteryData;
+
+/**
+ * Defines battery chemistry.
+ */
+
+typedef enum
+{
+
+ /// Specifies an alkaline battery.
+ NvRmPmuBatteryChemistry_Alkaline,
+
+ /// Specifies a nickel-cadmium (NiCd) battery.
+ NvRmPmuBatteryChemistry_NICD,
+
+ /// Specifies a nickel-metal hydride (NiMH) battery.
+ NvRmPmuBatteryChemistry_NIMH,
+
+ /// Specifies a lithium-ion (Li-ion) battery.
+ NvRmPmuBatteryChemistry_LION,
+
+ /// Specifies a lithium-ion polymer (Li-poly) battery.
+ NvRmPmuBatteryChemistry_LIPOLY,
+
+ /// Specifies a zinc-air battery.
+ NvRmPmuBatteryChemistry_XINCAIR,
+ NvRmPmuBatteryChemistry_Num,
+ NvRmPmuBatteryChemistry_Force32 = 0x7FFFFFFF
+} NvRmPmuBatteryChemistry;
+
+/**
+* Sets the charging current limit.
+*
+* @param hRmDevice The Rm device handle.
+* @param ChargingPath The charging path.
+* @param ChargingCurrentLimitMa The charging current limit in mA.
+* @param ChargerType Type of the charger detected
+* @see NvOdmUsbChargerType
+*/
+
+ void NvRmPmuSetChargingCurrentLimit(
+ NvRmDeviceHandle hRmDevice,
+ NvRmPmuChargingPath ChargingPath,
+ NvU32 ChargingCurrentLimitMa,
+ NvU32 ChargerType );
+
+/**
+ * Gets the AC line status.
+ *
+ * @param hDevice The Rm device handle.
+ * @param pStatus A pointer to the AC line
+ * status returned by the ODM.
+ *
+ * @return NV_TRUE if successful, or NV_FALSE otherwise.
+ */
+
+ NvBool NvRmPmuGetAcLineStatus(
+ NvRmDeviceHandle hRmDevice,
+ NvRmPmuAcLineStatus * pStatus );
+
+/**
+ * Gets the battery status.
+ *
+ * @param hDevice The Rm device handle.
+ * @param batteryInst The battery type.
+ * @param pStatus A pointer to the battery
+ * status returned by the ODM.
+ *
+ * @return NV_TRUE if successful, or NV_FALSE otherwise.
+ */
+
+ NvBool NvRmPmuGetBatteryStatus(
+ NvRmDeviceHandle hRmDevice,
+ NvRmPmuBatteryInstance batteryInst,
+ NvU8 * pStatus );
+
+/**
+ * Gets the battery data.
+ *
+ * @param hDevice The Rm device handle.
+ * @param batteryInst The battery type.
+ * @param pData A pointer to the battery
+ * data returned by the ODM.
+ *
+ * @return NV_TRUE if successful, or NV_FALSE otherwise.
+ */
+
+ NvBool NvRmPmuGetBatteryData(
+ NvRmDeviceHandle hRmDevice,
+ NvRmPmuBatteryInstance batteryInst,
+ NvRmPmuBatteryData * pData );
+
+/**
+ * Gets the battery full life time.
+ *
+ * @param hDevice The Rm device handle.
+ * @param batteryInst The battery type.
+ * @param pLifeTime A pointer to the battery
+ * full life time returned by the ODM.
+ *
+ */
+
+ void NvRmPmuGetBatteryFullLifeTime(
+ NvRmDeviceHandle hRmDevice,
+ NvRmPmuBatteryInstance batteryInst,
+ NvU32 * pLifeTime );
+
+/**
+ * Gets the battery chemistry.
+ *
+ * @param hDevice The Rm device handle.
+ * @param batteryInst The battery type.
+ * @param pChemistry A pointer to the battery
+ * chemistry returned by the ODM.
+ *
+ */
+
+ void NvRmPmuGetBatteryChemistry(
+ NvRmDeviceHandle hRmDevice,
+ NvRmPmuBatteryInstance batteryInst,
+ NvRmPmuBatteryChemistry * pChemistry );
+
+/**
+ * Reads current RTC count in seconds.
+ *
+ * @param hRmDevice The Rm device handle.
+ * @param Count A pointer to the RTC count returned by this function.
+ *
+ * @return NV_TRUE if successful, or NV_FALSE otherwise.
+ */
+
+ NvBool NvRmPmuReadRtc(
+ NvRmDeviceHandle hRmDevice,
+ NvU32 * pCount );
+
+/**
+ * Updates current RTC seconds count.
+ *
+ * @param hRmDevice The Rm device handle.
+ * @param Count Seconds count to update the RTC counter.
+ *
+ * @return NV_TRUE if successful, or NV_FALSE otherwise.
+ */
+
+ NvBool NvRmPmuWriteRtc(
+ NvRmDeviceHandle hRmDevice,
+ NvU32 Count );
+
+/**
+ * Verifies whether the RTC is initialized.
+ *
+ * @param hRmDevice The Rm device handle.
+ *
+ * @return NV_TRUE if initialized, or NV_FALSE otherwise.
+ */
+
+ NvBool NvRmPmuIsRtcInitialized(
+ NvRmDeviceHandle hRmDevice );
+
+/** @} */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_power.h b/arch/arm/mach-tegra/include/nvrm_power.h
new file mode 100644
index 000000000000..e8be8c9bf4cf
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_power.h
@@ -0,0 +1,1326 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_power_H
+#define INCLUDED_nvrm_power_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_module.h"
+#include "nvrm_init.h"
+
+#include "nvos.h"
+
+/**
+ * Frequency data type, expressed in KHz.
+ */
+
+typedef NvU32 NvRmFreqKHz;
+
+/**
+ * Special value for an unspecified or default frequency.
+ */
+static const NvRmFreqKHz NvRmFreqUnspecified = 0xFFFFFFFF;
+
+/**
+ * Special value for the maximum possible frequency.
+ */
+static const NvRmFreqKHz NvRmFreqMaximum = 0xFFFFFFFD;
+
+/**
+ * Voltage data type, expressed in millivolts.
+ */
+
+typedef NvU32 NvRmMilliVolts;
+
+/**
+ * Special value for an unspecified or default voltage.
+ */
+static const NvRmMilliVolts NvRmVoltsUnspecified = 0xFFFFFFFF;
+
+/**
+ * Special value for the maximum possible voltage.
+ */
+static const NvRmMilliVolts NvRmVoltsMaximum = 0xFFFFFFFD;
+
+/**
+ * Special value for voltage / power disable.
+ */
+static const NvRmMilliVolts NvRmVoltsCycled = 0xFFFFFFFC;
+
+/**
+ * Special value for voltage / power disable.
+ */
+static const NvRmMilliVolts NvRmVoltsOff = 0;
+
+/**
+ * Defines possible power management events
+ */
+
+typedef enum
+{
+
+ /// Specifies no outstanding events
+ NvRmPowerEvent_NoEvent = 1,
+
+ /// Specifies wake from LP0
+ NvRmPowerEvent_WakeLP0,
+
+ /// Specifies wake from LP1
+ NvRmPowerEvent_WakeLP1,
+ NvRmPowerEvent_Num,
+ NvRmPowerEvent_Force32 = 0x7FFFFFFF
+} NvRmPowerEvent;
+
+/**
+ * Defines combined RM clients power state
+ */
+
+typedef enum
+{
+
+ /// Specifies boot state ("RM is not open, yet")
+ NvRmPowerState_Boot = 1,
+
+ /// Specifies active state ("not ready-to-suspend")
+ /// This state is entered if any client enables power to any module, other
+ /// than NvRmPrivModuleID_System, via NvRmPowerVoltageControl() API
+ NvRmPowerState_Active,
+
+ /// Specifies h/w autonomous state ("ready-to-core-power-on-suspend")
+ /// This state is entered if all RM clients enable power only for
+ /// NvRmPrivModuleID_System, via NvRmPowerVoltageControl() API
+ NvRmPowerState_AutoHw,
+
+ /// Specifies idle state ("ready-to-core-power-off-suspend")
+ /// This state is entered if none of the RM clients enables power
+ /// to any module.
+ NvRmPowerState_Idle,
+
+ /// Specifies LP0 state ("main power-off suspend")
+ NvRmPowerState_LP0,
+
+ /// Specifies LP1 state ("main power-on suspend")
+ NvRmPowerState_LP1,
+
+ /// Specifies Skipped LP0 state (set when LP0 entry error is
+ /// detected, SoC resumes operations without entering LP0 state)
+ NvRmPowerState_SkippedLP0,
+ NvRmPowerState_Num,
+ NvRmPowerState_Force32 = 0x7FFFFFFF
+} NvRmPowerState;
+
+/** Defines the clock configuration flags which are applicable for some modules.
+ * Multiple flags can be OR'ed and passed to the NvRmPowerModuleClockConfig API.
+*/
+
+typedef enum
+{
+
+ /// Use external clock for the pads of the module.
+ NvRmClockConfig_ExternalClockForPads = 0x1,
+
+ /// Use internal clock for the pads of the module
+ NvRmClockConfig_InternalClockForPads = 0x2,
+
+ /// Use external clock for the core of the module, or
+ /// module is in slave mode
+ NvRmClockConfig_ExternalClockForCore = 0x4,
+
+ /// Use Internal clock for the core of the module, or
+ /// module is in master mode.
+ NvRmClockConfig_InternalClockForCore = 0x8,
+
+ /// Use inverted clock for the module. i.e the polarity of the clock used is
+ /// inverted with respect to the source clock.
+ NvRmClockConfig_InvertedClock = 0x10,
+
+ /// Configure target module sub-clock
+ /// - Target Display: configure Display and TVDAC
+ /// - Target TVO: configure CVE and TVDAC only
+ /// - Target VI: configure VI_SENSOR only
+ /// - Target SPDIF: configure SPDIFIN only
+ NvRmClockConfig_SubConfig = 0x20,
+
+ /// Use MIPI PLL as Display clock source
+ NvRmClockConfig_MipiSync = 0x40,
+
+ /// Adjust Audio PLL to match requested I2S or SPDIF frequency
+ NvRmClockConfig_AudioAdjust = 0x80,
+
+ /// Disable TVDAC along with Display configuration
+ NvRmClockConfig_DisableTvDAC = 0x100,
+
+ /// Do not fail clock configuration request with specific target frequency
+ /// above Hw limit - just configure clock at Hw limit. (Note that caller
+ /// can request NvRmFreqMaximum to configure clock at Hw limit, regardless
+ /// of this flag presence).
+ NvRmClockConfig_QuietOverClock = 0x200,
+ NvRmClockConfigFlags_Num,
+ NvRmClockConfigFlags_Force32 = 0x7FFFFFFF
+} NvRmClockConfigFlags;
+
+/**
+ * Defines SOC-wide clocks controlled by Dynamic Frequency Scaling (DFS)
+ * that can be targeted by Starvation and Busy hints
+ */
+
+typedef enum
+{
+
+ /// Specifies CPU clock
+ NvRmDfsClockId_Cpu = 1,
+
+ /// Specifies AVP clock
+ NvRmDfsClockId_Avp,
+
+ /// Specifies System bus clock
+ NvRmDfsClockId_System,
+
+ /// Specifies AHB bus clock
+ NvRmDfsClockId_Ahb,
+
+ /// Specifies APB bus clock
+ NvRmDfsClockId_Apb,
+
+ /// Specifies video pipe clock
+ NvRmDfsClockId_Vpipe,
+
+ /// Specifies external memory controller clock
+ NvRmDfsClockId_Emc,
+ NvRmDfsClockId_Num,
+ NvRmDfsClockId_Force32 = 0x7FFFFFFF
+} NvRmDfsClockId;
+
+/**
+ * Defines DFS manager run states
+ */
+
+typedef enum
+{
+
+ /// DFS is in invalid, not initialized state
+ NvRmDfsRunState_Invalid = 0,
+
+ /// DFS is disabled / not supported (terminal state)
+ NvRmDfsRunState_Disabled = 1,
+
+ /// DFS is stopped - no automatic clock control. Starvation and Busy hints
+ /// are recorded but have no affect.
+ NvRmDfsRunState_Stopped,
+
+ /// DFS is running in closed loop - full automatic control of SoC-wide
+ /// clocks based on clock activity measuremnets. Starvation and Busy hints
+ /// are functional as well.
+ NvRmDfsRunState_ClosedLoop,
+
+ /// DFS is running in closed loop with profiling (can not be set on non
+ /// profiling build).
+ NvRmDfsRunState_ProfiledLoop,
+ NvRmDfsRunState_Num,
+ NvRmDfsRunState_Force32 = 0x7FFFFFFF
+} NvRmDfsRunState;
+
+/**
+ * Defines DFS profile targets
+ */
+
+typedef enum
+{
+
+ /// DFS algorithm within ISR
+ NvRmDfsProfileId_Algorithm = 1,
+
+ /// DFS Interrupt service - includes algorithm plus OS locking and
+ /// signaling calls; hence, includes blocking time (if any) as well
+ NvRmDfsProfileId_Isr,
+
+ /// DFS clock control time - includes PLL stabilazation time, OS locking
+ /// and signalling calls; hence, includes blocking time (if any) as well
+ NvRmDfsProfileId_Control,
+ NvRmDfsProfileId_Num,
+ NvRmDfsProfileId_Force32 = 0x7FFFFFFF
+} NvRmDfsProfileId;
+
+/**
+ * Defines voltage rails that are controlled in conjunction with dynamic
+ * frequency scaling.
+ */
+
+typedef enum
+{
+
+ /// SoC core rail
+ NvRmDfsVoltageRailId_Core = 1,
+
+ /// Dedicated CPU rail
+ NvRmDfsVoltageRailId_Cpu,
+ NvRmDfsVoltageRailId_Num,
+ NvRmDfsVoltageRailId_Force32 = 0x7FFFFFFF
+} NvRmDfsVoltageRailId;
+
+/**
+ * Defines busy hint API synchronization modes.
+ */
+
+typedef enum
+{
+
+ /// Asynchronous mode (non-blocking API)
+ NvRmDfsBusyHintSyncMode_Async = 1,
+
+ /// Synchronous mode (blocking API)
+ NvRmDfsBusyHintSyncMode_Sync,
+ NvRmDfsBusyHintSyncMode_Num,
+ NvRmDfsBusyHintSyncMode_Force32 = 0x7FFFFFFF
+} NvRmDfsBusyHintSyncMode;
+
+/**
+ * Holds information on DFS clock domain utilization
+ */
+
+typedef struct NvRmDfsClockUsageRec
+{
+
+ /// Minimum clock domain frequency
+ NvRmFreqKHz MinKHz;
+
+ /// Maximum clock domain frequency
+ NvRmFreqKHz MaxKHz;
+
+ /// Low corner frequency - current low boundary for DFS control algorithm.
+ /// Can be dynamically adjusted via APIs: NvRmDfsSetLowCorner() for all DFS
+ /// domains, NvRmDfsSetCpuEnvelope() for CPU, and NvRmDfsSetEmcEnvelope()
+ /// for EMC. When all DFS domains hit low corner, DFS stops waking up CPU
+ /// from low power state.
+ NvRmFreqKHz LowCornerKHz;
+
+ /// High corner frequency - current high boundary for DFS control algorithm.
+ /// Can be dynamically adjusted via APIs: NvRmDfsSetCpuEnvelope() for Cpu,
+ /// NvRmDfsSetEmcEnvelope() for Emc, and NvRmDfsSetAvHighCorner() for other
+ // DFS domains.
+ NvRmFreqKHz HighCornerKHz;
+
+ /// Current clock domain frequency
+ NvRmFreqKHz CurrentKHz;
+
+ /// Average frequency of domain *activity* (not average frequency). For
+ /// domains that do not have activity monitors reported as unspecified.
+ NvRmFreqKHz AverageKHz;
+} NvRmDfsClockUsage;
+
+/**
+ * Holds information on DFS busy hint
+ */
+
+typedef struct NvRmDfsBusyHintRec
+{
+
+ /// Target clock domain ID
+ NvRmDfsClockId ClockId;
+
+ /// Requested boost duration in milliseconds
+ NvU32 BoostDurationMs;
+
+ /// Requested clock frequency level in kHz
+ NvRmFreqKHz BoostKHz;
+
+ /// Busy pulse mode indicator - if true, busy boost is completely removed
+ /// after busy time has expired; if false, DFS will gradually lower domain
+ /// frequency after busy boost.
+ NvBool BusyAttribute;
+} NvRmDfsBusyHint;
+
+/**
+ * Holds information on DFS starvation hint
+ */
+
+typedef struct NvRmDfsStarvationHintRec
+{
+
+ /// Target clock domain ID
+ NvRmDfsClockId ClockId;
+
+ /// The starvation indicator for the target domain
+ NvBool Starving;
+} NvRmDfsStarvationHint;
+
+/**
+ * The NVRM_POWER_CLIENT_TAG macro is used to convert ASCII 4-character codes
+ * into the 32-bit tag that can be used to identify power manager clients for
+ * logging purposes.
+ */
+#define NVRM_POWER_CLIENT_TAG(a,b,c,d) \
+ ((NvU32) ((((a)&0xffUL)<<24UL) | \
+ (((b)&0xffUL)<<16UL) | \
+ (((c)&0xffUL)<< 8UL) | \
+ (((d)&0xffUL))))
+
+/**
+ * Registers RM power client.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param hEventSemaphore The client semaphore for power management event
+ * signaling. If null, no events will be signaled to the particular client.
+ * @param pClientId A pointer to the storage that on entry contains client
+ * tag (optional), and on exit returns client ID, assigned by power manager.
+ *
+ * @retval NvSuccess if registration was successful.
+ * @retval NvError_InsufficientMemory if failed to allocate memory for client
+ * registration.
+ */
+
+ NvError NvRmPowerRegister(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvOsSemaphoreHandle hEventSemaphore,
+ NvU32 * pClientId );
+
+/**
+ * Unregisters RM power client. Power and clock for the modules enabled by this
+ * client are disabled and any starvation or busy requests are cancelled during
+ * the unregistration.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param ClientId The client ID obtained during registration.
+ */
+
+ void NvRmPowerUnRegister(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 ClientId );
+
+/**
+ * Gets last detected and not yet retrieved power management event.
+ * Returns no outstanding event if no events has been detected since the
+ * client registration or the last call to this function.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param ClientId The client ID obtained during registration.
+ * @param pEvent Output storage pointer for power event identifier.
+ *
+ * @retval NvSuccess if event identifier was retrieved successfully.
+ * @retval NvError_BadValue if specified client ID is not registered.
+ */
+
+ NvError NvRmPowerGetEvent(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 ClientId,
+ NvRmPowerEvent * pEvent );
+
+/**
+ * Notifies RM about power management event. Provides an interface for
+ * OS power manager to report system power events to RM.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param Event The event RM power manager is to be aware of.
+ */
+
+ void NvRmPowerEventNotify(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmPowerEvent Event );
+
+/**
+ * Gets combined RM clients power state.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param pState Output storage pointer for combined RM clients power state.
+ *
+ * @retval NvSuccess if power state was retrieved successfully.
+ */
+
+ NvError NvRmPowerGetState(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmPowerState * pState );
+
+/**
+ * Gets SoC primary oscillator/input frequency.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ *
+ * @retval Primary frequency in KHz.
+ */
+
+ NvRmFreqKHz NvRmPowerGetPrimaryFrequency(
+ NvRmDeviceHandle hRmDeviceHandle );
+
+/**
+ * Gets maximum frequency limit for the module clock.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param ModuleId The combined module ID and instance of the target module.
+ *
+ * @retval Module clock maximum frequency in KHz.
+ */
+
+ NvRmFreqKHz NvRmPowerModuleGetMaxFrequency(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID ModuleId );
+
+/**
+ * This API is used to set the clock configuration of the module clock.
+ * This API can also be used to query the existing configuration.
+ *
+ * Usage example:
+ *
+ * NvError Error;
+ * NvRmFreqKHz MyFreqKHz = 0;
+ * ModuleId = NVRM_MODULE_ID(NvRmModuleID_Uart, 0);
+ *
+ * // Get current frequency settings
+ * Error = NvRmPowerModuleClockConfig(RmHandle, ModuleId, ClientId,
+ * 0, 0, NULL, 0, &MyFreqKHz, 0);
+ *
+ * // Set target frequency within HW defined limits
+ * MyFreqKHz = TARGET_FREQ;
+ * Error = NvRmPowerModuleClockConfig(RmHandle, ModuleId, ClientId,
+ * NvRmFreqUnspecified, NvRmFreqUnspecified,
+ * &MyFreqKHz, 1, &MyFreqKHz);
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param ModuleId The combined module ID and instance of the target module.
+ * @param ClientId The client ID obtained during registration.
+ * @param MinFreq Requested minimum frequency for hardware module operation.
+ * If the value is NvRmFreqUnspecified, RM uses the the min freq that this
+ * module can operate.
+ * If the value specified is more than the Hw minimum, passed value is used.
+ * If the value specified is less than the Hw minimum, it will be clipped to
+ * the HW minimum value.
+ * @param MaxFreq Requested maximum frequency for hardware module operation.
+ * If the value is NvRmFreqUnspecified, RM uses the the max freq that this
+ * module can run.
+ * If the value specified is less than the Hw maximum, that value is used.
+ * If the value specified is more than the Hw limit, it will be clipped to
+ * the HW maximum.
+ * @param PrefFreqList Pointer to a list of preferred frequencies, sorted in the
+ * decresing order of priority. Use NvRmFreqMaximum to request Hw maximum.
+ * @param PrefFreqListCount Number of entries in the PrefFreqList array.
+ * @param CurrentFreq Returns the current clock frequency of that module. NULL
+ * is a valid value for this parameter.
+ * @param flags Module specific flags. Thse flags are valid only for some
+ * modules. See @NvRmClockConfigFlags
+ *
+ * @retval NvSuccess if clock control request completed successfully.
+ * @retval NvError_ModuleNotPresent if the module ID or instance is invalid.
+ * @retval NvError_NotSupported if failed to configure requested frequency (e.g.,
+ * output frequency for possible divider settings is outside specified range).
+ */
+
+ NvError NvRmPowerModuleClockConfig(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID ModuleId,
+ NvU32 ClientId,
+ NvRmFreqKHz MinFreq,
+ NvRmFreqKHz MaxFreq,
+ const NvRmFreqKHz * PrefFreqList,
+ NvU32 PrefFreqListCount,
+ NvRmFreqKHz * CurrentFreq,
+ NvU32 flags );
+
+/**
+ * This API is used to enable and disable the module clock.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param ModuleId The combined module ID and instance of the target module.
+ * @param ClientId The client ID obtained during registration.
+ * @param Enable Enables/diables the module clock.
+ *
+ * @retval NvSuccess if the module is enabled.
+ * @retval NvError_ModuleNotPresent if the module ID or instance is invalid.
+ */
+
+ NvError NvRmPowerModuleClockControl(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID ModuleId,
+ NvU32 ClientId,
+ NvBool Enable );
+
+/**
+ * Request the voltage range for a hardware module. As power planes are shared
+ * between different modules, in the majority of cases the RM will choose the
+ * appropriate voltage, and module owners only need to enable or disable power
+ * for a module. Enable request is always completed (i.e., voltage is applied
+ * to the module) before this function returns. Disable request just means that
+ * the client is ready for module power down. Actually the power may be removed
+ * within the call or any time later, depending on other client needs and power
+ * plane dependencies with other modules.
+ *
+ * Assert encountered in debug mode if the module ID or instance is invalid.
+ *
+ * Usage example:
+ *
+ * NvError Error;
+ * ModuleId = NVRM_MODULE_ID(NvRmModuleID_Uart, 0);
+ *
+ * // Enable module power
+ * Error = NvRmPowerVoltageControl(RmHandle, ModuleId, ClientId,
+ * NvRmVoltsUnspecified, NvRmVoltsUnspecified,
+ * NULL, 0, NULL);
+ *
+ * // Disable module power
+ * Error = NvRmPowerVoltageControl(RmHandle, ModuleId, ClientId,
+ * NvRmVoltsOff, NvRmVoltsOff,
+ * NULL, 0, NULL);
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param ModuleId The combined module ID and instance of the target module
+ * @param ClientId The client ID obtained during registration
+ * @param MinVolts Requested minimum voltage for hardware module operation
+ * @param MaxVolts Requested maximum voltage for hardware module operation
+ * Set to NvRmVoltsUnspecified when enabling power for a module, or to
+ * NvRmVoltsOff when disabling.
+ * @param PrefVoltageList Pointer to a list of preferred voltages, ordered from
+ * lowest to highest, and terminated with a voltage of NvRmVoltsUnspecified.
+ * This parameter is optional - ignored if null.
+ * @param PrefVoltageListCount Number of entries in the PrefVoltageList array.
+ * @param CurrentVolts Output storage pointer for resulting module voltage.
+ * NvRmVoltsUnspecified is returned if module power is On and was not cycled,
+ * since the last voltage request with the same ClientId and ModuleId;
+ * NvRmVoltsCycled is returned if module power is On but was powered down,
+ * since the last voltage request with the same ClientId and ModuleId;
+ * NvRmVoltsOff is returned if module power is Off.
+ * This parameter is optional - ignored if null.
+ *
+ * @retval NvSuccess if voltage control request completed successfully.
+ * @retval NvError_BadValue if specified client ID is not registered.
+ * @retval NvError_InsufficientMemory if failed to allocate memory for
+ * voltage request.
+ */
+
+ NvError NvRmPowerVoltageControl(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID ModuleId,
+ NvU32 ClientId,
+ NvRmMilliVolts MinVolts,
+ NvRmMilliVolts MaxVolts,
+ const NvRmMilliVolts * PrefVoltageList,
+ NvU32 PrefVoltageListCount,
+ NvRmMilliVolts * CurrentVolts );
+
+/**
+ * Lists modules registered by power clients for voltage control.
+ *
+ * @param pListSize Pointer to the list size. On entry specifies list size
+ * allocated by the caller, on exit - actual number of Ids returned. If
+ * entry size is 0, maximum list size is returned.
+ * @param pIdList Pointer to the list of combined module Id/Instance values
+ * to be filled in by this function. Ignored if input list size is 0.
+ * @param pActiveList Pointer to the list of modules Active attributes
+ * to be filled in by this function. Ignored if input list size is 0.
+ */
+
+ void NvRmListPowerAwareModules(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 * pListSize,
+ NvRmModuleID * pIdList,
+ NvBool * pActiveList );
+
+/**
+ * Requests immediate frequency boost for SOC-wide clocks. In general, the RM
+ * DFS manages SOC-wide clocks by measuring the average use of clock cycles,
+ * and adjusting clock rates to minimize wasted clocks. It is preferable and
+ * expected that modules consume clock cycles at a more-or-less constant rate.
+ * Under some circumstances this will not be the case. For example, many cycles
+ * may be consumed to prime a new media processing activity. If power client
+ * anticipates such circumstances, it may sparingly use this API to alert the RM
+ * that a temporary spike in clock usage is about to occur.
+ *
+ * Usage example:
+ *
+ * // Busy hint for CPU clock
+ * NvError Error;
+ * Error = NvRmPowerBusyHint(RmHandle, NvRmDfsClockId_Cpu, ClientId,
+ * BoostDurationMs, BoostFreqKHz);
+ *
+ * Clients should not call this API in an attempt to micro-manage a particular
+ * clock frequency as that is the responsibility of the RM.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param ClockId The DFS ID of the clock targeted by this hint.
+ * @param ClientId The client ID obtained during registration.
+ * @param BoostDurationMs The estimate of the boost duration in milliseconds.
+ * Use NV_WAIT_INFINITE to specify busy until canceled. Use 0 to request
+ * instantaneous spike in frequency and let DFS to scale down.
+ * @param BoostKHz The requirements for the boosted clock frequency in kHz.
+ * Use NvRmFreqMaximum to request maximum domain frequency. Use 0 to cancel
+ * all busy hints reported by the specified client for the specified domain.
+ *
+ * @retval NvSuccess if busy request completed successfully.
+ * @retval NvError_BadValue if specified client ID is not registered.
+ * @retval NvError_InsufficientMemory if failed to allocate memory for
+ * busy hint.
+ */
+
+ NvError NvRmPowerBusyHint(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmDfsClockId ClockId,
+ NvU32 ClientId,
+ NvU32 BoostDurationMs,
+ NvRmFreqKHz BoostKHz );
+
+/**
+ * Requests immediate frequency boost for multiple SOC-wide clock domains.
+ * @sa NvRmPowerBusyHint() for detailed explanation of busy hint effects.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param ClientId The client ID obtained during registration.
+ * @param pMultiHint Pointer to a list of busy hint records for
+ * targeted clocks.
+ * @param NumHints Number of entries in pMultiHint array.
+ * @param Mode Synchronization mode. In asynchronous mode this API returns to
+ * the caller after request is signaled to power manager (non-blocking call).
+ * In synchronous mode the API returns after busy hints are processed by power
+ * manager (blocking call).
+ *
+ * @note It is recommended to use synchronous mode only when low frequency
+ * may result in functional failure. Otherwise, use asynchronous mode or
+ * NvRmPowerBusyHint API, which is always executed as non-blocking request.
+ * Synchronous mode must not be used by PMU transport.
+ *
+ *
+ * @retval NvSuccess if busy hint request completed successfully.
+ * @retval NvError_BadValue if specified client ID is not registered.
+ * @retval NvError_InsufficientMemory if failed to allocate memory for
+ * busy hints.
+ */
+
+ NvError NvRmPowerBusyHintMulti(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 ClientId,
+ const NvRmDfsBusyHint * pMultiHint,
+ NvU32 NumHints,
+ NvRmDfsBusyHintSyncMode Mode );
+
+/**
+ * Request frequency increase for SOC-wide clock to avoid real-time starvation
+ * conditions. Allows modules to contribute to the detection and avoidance of
+ * clock starvation for DFS controlled clocks.
+ *
+ * This API should be called to indicate starvation threat and also to cancel
+ * request when a starvation condition has eased.
+ *
+ * @note Although the RM DFS does its best to manage clocks without starving
+ * the system for clock cycles, bursty clock usage can occasionally cause
+ * short-term clock starvation. One solution is to leave a large enough clock
+ * rate guard band such that any possible burst in clock usage will be absorbed.
+ * This approach tends to waste clock cycles, and worsen power management.
+ *
+ * By allowing power clients to participate in the avoidance of system clock
+ * starvation situations, detection responsibility can be moved closer to the
+ * hardware buffers and processors where starvation occurs, while leaving the
+ * overall dynamic clocking policy to the RM. A typical client would be a module
+ * that manages media processing and is able to determine when it is falling
+ * behind by watching buffer levels or some other module-specific indicator. In
+ * response to the starvation request the RM increases gradually the respective
+ * clock frequency until the request vis cancelled by the client.
+ *
+ * Usage example:
+ *
+ * NvError Error;
+ *
+ * // Request CPU clock frequency increase to avoid starvation
+ * Error = NvRmPowerStarvationHint(
+ * RmHandle, NvRmDfsClockId_Cpu, ClientId, NV_TRUE);
+ *
+ * // Cancel starvation request for CPU clock frequency
+ * Error = NvRmPowerStarvationHint(
+ * RmHandle, NvRmDfsClockId_Cpu, ClientId, NV_FALSE);
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param ClockId The DFS ID of the clock targeted by this hint.
+ * @param ClientId The client ID obtained during registration.
+ * @param Starving The starvation indicator for the target module. If true,
+ * the client is requesting target frequency increase to avoid starvation
+ * If false, the indication is that the imminent starvation is no longer a
+ * concern for this particular client.
+ *
+ * @retval NvSuccess if starvation request completed successfully.
+ * @retval NvError_BadValue if specified client ID is not registered.
+ * @retval NvError_InsufficientMemory if failed to allocate memory for
+ * starvation hint.
+ */
+
+ NvError NvRmPowerStarvationHint(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmDfsClockId ClockId,
+ NvU32 ClientId,
+ NvBool Starving );
+
+/**
+ * Request frequency increase for multiple SOC-wide clock domains to avoid
+ * real-time starvation conditions.
+ * @sa NvRmPowerStarvationHint() for detailed explanation of starvation hint
+ * effects.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param ClientId The client ID obtained during registration.
+ * @param pMultiHint Pointer to a list of starvation hint records for
+ * targeted clocks.
+ * @param NumHints Number of entries in pMultiHint array.
+ *
+ * @retval NvSuccess if starvation hint request completed successfully.
+ * @retval NvError_BadValue if specified client ID is not registered.
+ * @retval NvError_InsufficientMemory if failed to allocate memory for
+ * starvation hints.
+ */
+
+ NvError NvRmPowerStarvationHintMulti(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 ClientId,
+ const NvRmDfsStarvationHint * pMultiHint,
+ NvU32 NumHints );
+
+/**
+ * Notifies the RM about DDK module activity.
+ *
+ * @note This function lets DDK modules notify the RM about interesting system
+ * activities. Not all modules will need to make this indication, typically only
+ * modules involved in user input or output activities. However, with current
+ * SOC power management architecture such activities will be detected by the OS
+ * adaptation layer, not RM. This API is not removed, just in case, we will find
+ * out that RM still need to participate in user activity detection. In general,
+ * modules should call this interface sparingly, no more than once every few
+ * seconds.
+ *
+ * In current power management architecture user activity is handled by OS
+ * (nor RM) power manager, and activity API is not used at all.
+ *
+ * Assert encountered in debug mode if the module ID or instance is invalid.
+ *
+ * TODO: Remove this API?
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param ModuleId The combined module ID and instance of the target module.
+ * @param ClientId The client ID obtained during registration.
+ * @param ActivityDurationMs The duration of the module activity.
+ *
+ * For cases when activity is a series of discontinuous events (keypresses, for
+ * example), this parameter should simply be set to 1.
+ *
+ * For lengthy, continuous activities, this parameter is set to the estimated
+ * length of the activity in milliseconds. This can reduce the number of calls
+ * made to this API.
+ *
+ * A value of 0 in this parameter indicates that the module is not active and
+ * can be used to signal the end of a previously estimated continuous activity.
+ *
+ * @retval NvSuccess if clock control request completed successfully.
+ */
+
+ NvError NvRmPowerActivityHint(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID ModuleId,
+ NvU32 ClientId,
+ NvU32 ActivityDurationMs );
+
+/**
+ * Gets DFS run sate.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ *
+ * @return Current DFS run state.
+ */
+
+ NvRmDfsRunState NvRmDfsGetState(
+ NvRmDeviceHandle hRmDeviceHandle );
+
+/**
+ * Gets information on DFS controlled clock utilization. If DFS is stopped
+ * or disabled the average frequency is always equal to current frequency.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param ClockId The DFS ID of the clock targeted by this request.
+ * @param pClockInfo Output storage pointer for clock utilization information.
+ *
+ * @return NvSuccess if clock usage information is returned successfully.
+ */
+
+ NvError NvRmDfsGetClockUtilization(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmDfsClockId ClockId,
+ NvRmDfsClockUsage * pClockUsage );
+
+/**
+ * Sets DFS run state. Allows to stop or re-start DFS as well as switch
+ * between open and closed loop operations.
+ *
+ * On transition to the DFS stopped state, the DFS clocks are just kept at
+ * current frequencies. On transition to DFS run states, DFS sampling data
+ * is re-initialized only if originally DFS was stopped. Transition between
+ * running states has no additional effects, besides operation mode changes.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param NewDfsRunState The DFS run state to be set.
+ *
+ * @retval NvSuccess if DFS state was set successfully.
+ * @retval NvError_NotSupported if DFS was disabled initially, in attempt
+ * to disable initially enabled DFS, or in attempt to run profiled loop
+ * on non profiling build.
+ */
+
+ NvError NvRmDfsSetState(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmDfsRunState NewDfsRunState );
+
+/**
+ * Sets DFS low corner frequencies - low boundaries for DFS clocks when DFS.
+ * is running. If all DFS domains hit low corner, DFS will no longer wake
+ * CPU from low power state.
+ *
+ * @note When CPU envelope is set via NvRmDfsSetCpuEnvelope() API the CPU
+ * low corner boundary can not be changed by this function.
+ * @note When EMC envelope is set via NvRmDfsSetEmcEnvelope() API the EMC
+ * low corner boundary can not be changed by this function.
+ *
+ * Usage example:
+ *
+ * NvError Error;
+ * NvRmFreqKHz LowCorner[NvRmDfsClockId_Num];
+ *
+ * // Fill in low corner array
+ * LowCorner[NvRmDfsClockId_Cpu] = NvRmFreqUnspecified;
+ * LowCorner[NvRmDfsClockId_Avp] = ... ;
+ * LowCorner[NvRmDfsClockId_System] = ...;
+ * LowCorner[NvRmDfsClockId_Ahb] = ...;
+ * LowCorner[NvRmDfsClockId_Apb] = ...;
+ * LowCorner[NvRmDfsClockId_Vpipe] = ...;
+ * LowCorner[NvRmDfsClockId_Emc] = ...;
+ *
+ * // Set new low corner for domains other than CPU, and preserve CPU boundary
+ * Error = NvRmDfsSetLowCorner(RmHandle, NvRmDfsClockId_Num, LowCorner);
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param DfsFreqListCount Number of entries in the pDfsLowFreqList array.
+ * Must be always equal to NvRmDfsClockId_Num.
+ * @param pDfsLowFreqList Pointer to a list of low corner frequencies, ordered
+ * according to NvRmDfsClockId enumeration. If the list entry is set to
+ * NvRmFreqUnspecified, the respective low corner boundary is not modified.
+ *
+ * @retval NvSuccess if low corner frequencies were updated successfully.
+ * @retval NvError_NotSupported if DFS is disabled.
+ */
+
+ NvError NvRmDfsSetLowCorner(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 DfsFreqListCount,
+ const NvRmFreqKHz * pDfsLowFreqList );
+
+/**
+ * Sets DFS target frequencies. If DFS is stopped clocks for the DFS domains
+ * will be targeted with the specified frequencies. In any other DFS state
+ * this function has no effect.
+ *
+ * Usage example:
+ *
+ * NvError Error;
+ * NvRmFreqKHz Target[NvRmDfsClockId_Num];
+ *
+ * // Fill in target frequencies array
+ * Target[NvRmDfsClockId_Cpu] = ... ;
+ * Target[NvRmDfsClockId_Avp] = ... ;
+ * Target[NvRmDfsClockId_System] = ...;
+ * Target[NvRmDfsClockId_Ahb] = ...;
+ * Target[NvRmDfsClockId_Apb] = ...;
+ * Target[NvRmDfsClockId_Vpipe] = ...;
+ * Target[NvRmDfsClockId_Emc] = ...;
+ *
+ * // Set new target
+ * Error = NvRmDfsSetTarget(RmHandle, NvRmDfsClockId_Num, Target);
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param DfsFreqListCount Number of entries in the pDfsTargetFreqList array.
+ * Must be always equal to NvRmDfsClockId_Num.
+ * @param pDfsTargetFreqList Pointer to a list of target frequencies, ordered
+ * according to NvRmDfsClockId enumeration. If the list entry is set to
+ * NvRmFreqUnspecified, the current domain frequency is used as a target.
+ *
+ * @retval NvSuccess if target frequencies were updated successfully.
+ * @retval NvError_NotSupported if DFS is not stopped (disabled, or running).
+ */
+
+ NvError NvRmDfsSetTarget(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 DfsFreqListCount,
+ const NvRmFreqKHz * pDfsTargetFreqList );
+
+/**
+ * Sets DFS high and low boundaries for CPU domain clock frequency.
+ *
+ * Usage example:
+ *
+ * NvError Error;
+ *
+ * // Set CPU envelope boundaries to LowKHz : HighKHz
+ * Error = NvRmDfsSetCpuEnvelope(RmHandle, LowKHz, HighKHz);
+ *
+ * // Change CPU envelope high boundary to HighKHz
+ * Error = NvRmDfsSetCpuEnvelope(RmHandle, NvRmFreqUnspecified, HighKHz);
+ *
+ * // Release CPU envelope back to HW limits
+ * Error = NvRmDfsSetCpuEnvelope(RmHandle, 0, NvRmFreqMaximum);
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param DfsCpuEnvelopeLowKHz Requested low boundary in kHz.
+ * @param DfsCpuEnvelopeHighKHz Requested high limit in kHz.
+ *
+ * Envelope parameters are clipped to the HW defined CPU domain range.
+ * If envelope parameter is set to NvRmFreqUnspecified, the respective
+ * CPU boundary is not modified, unless it violates the new setting for
+ * the other boundary; in the latter case both boundaries are set to the
+ * new specified value.
+ *
+ * @retval NvSuccess if DFS envelope for for CPU domain was updated
+ * successfully.
+ * @retval NvError_BadValue if reversed boundaries are specified.
+ * @retval NvError_NotSupported if DFS is disabled.
+ */
+
+ NvError NvRmDfsSetCpuEnvelope(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmFreqKHz DfsCpuLowCornerKHz,
+ NvRmFreqKHz DfsCpuHighCornerKHz );
+
+/**
+ * Sets DFS high and low boundaries for EMC domain clock frequency.
+ *
+ * Usage example:
+ *
+ * NvError Error;
+ *
+ * // Set EMC envelope boundaries to LowKHz : HighKHz
+ * Error = NvRmDfsSetEmcEnvelope(RmHandle, LowKHz, HighKHz);
+ *
+ * // Change EMC envelope high boundary to HighKHz
+ * Error = NvRmDfsSetEmcEnvelope(RmHandle, NvRmFreqUnspecified, HighKHz);
+ *
+ * // Release EMC envelope back to HW limits
+ * Error = NvRmDfsSetEmcEnvelope(RmHandle, 0, NvRmFreqMaximum);
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param DfsEmcEnvelopeLowKHz Requested low boundary in kHz.
+ * @param DfsEmcEnvelopeHighKHz Requested high limit in kHz.
+ *
+ * Envelope parameters are clipped to the ODM defined EMC configurations
+ * within HW defined EMC domain range. If envelope parameter is set to
+ * NvRmFreqUnspecified, the respective EMC boundary is not modified, unless
+ * it violates the new setting for the other boundary; in the latter case
+ * both boundaries are set to the new specified value.
+ *
+ * @retval NvSuccess if DFS envelope for for EMC domain was updated
+ * successfully.
+ * @retval NvError_BadValue if reversed boundaries are specified.
+ * @retval NvError_NotSupported if DFS is disabled.
+ */
+
+ NvError NvRmDfsSetEmcEnvelope(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmFreqKHz DfsEmcLowCornerKHz,
+ NvRmFreqKHz DfsEmcHighCornerKHz );
+
+/**
+ * Sets DFS high boundaries for CPU and EMC.
+ *
+ * @note When either CPU or EMC envelope is set via NvRmDfsSetXxxEnvelope()
+ * API, neither CPU nor EMC boundary is changed by this function.
+ *
+ * Usage example:
+ *
+ * NvError Error;
+ *
+ * // Set CPU subsystem clock limit to CpuHighKHz and Emc clock limit
+ * // to EmcHighKHz
+ * Error = NvRmDfsSetCpuEmcHighCorner(RmHandle, CpuHighKHz, EmcHighKHz);
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param DfsCpuHighKHz Requested high boundary in kHz for CPU.
+ * @param DfsEmcHighKHz Requested high limit in kHz for EMC.
+ *
+ * Requested parameters are clipped to the respective HW defined domain
+ * ranges, as well as to ODM defined EMC configurations. If any parameter
+ * is set to NvRmFreqUnspecified, the respective boundary is not modified.
+ *
+ * @retval NvSuccess if high corner for AV subsystem was updated successfully.
+ * @retval NvError_NotSupported if DFS is disabled.
+ */
+
+ NvError NvRmDfsSetCpuEmcHighCorner(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmFreqKHz DfsCpuHighKHz,
+ NvRmFreqKHz DfsEmcHighKHz );
+
+/**
+ * Sets DFS high boundaries for AV subsystem clocks.
+ *
+ * Usage example:
+ *
+ * NvError Error;
+ *
+ * // Set AVP clock limit to AvpHighKHz, Vde clock limit to VpipeHighKHz,
+ * // and preserve System bus clock limit provided it is above requested
+ * // AVP and Vpipe levels.
+ * Error = NvRmDfsSetAvHighCorner(
+ * RmHandle, NvRmFreqUnspecified, AvpHighKHz, VpipeHighKHz);
+ *
+ *@note System bus clock limit must be always above AvpHighKHz, and above
+ * VpipeHighKHz. Therefore it may be adjusted up, as a result of this call,
+ * even though, it is marked unspecified.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param DfsSysHighKHz Requested high boundary in kHz for System bus.
+ * @param DfsAvpHighKHz Requested high boundary in kHz for AVP.
+ * @param DfsVdeHighCornerKHz Requested high limit in kHz for Vde pipe.
+ *
+ * Requested parameter is clipped to the respective HW defined domain
+ * range. If parameter is set to NvRmFreqUnspecified, the respective
+ * boundary is not modified.
+ *
+ * @retval NvSuccess if high corner for AV subsystem was updated successfully.
+ * @retval NvError_NotSupported if DFS is disabled.
+ */
+
+ NvError NvRmDfsSetAvHighCorner(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmFreqKHz DfsSystemHighKHz,
+ NvRmFreqKHz DfsAvpHighKHz,
+ NvRmFreqKHz DfsVpipeHighKHz );
+
+/**
+ * Gets DFS profiling information.
+ *
+ * DFS profiling starts/re-starts every time NvRmDfsRunState_ProfiledLoop
+ * state is set via NvRmDfsSetState(). DFS profiling stops when any other
+ * sate is set.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param DfsProfileCount Number of DFS profiles. Must be always equal to
+ * NvRmDfsProfileId_Num.
+ * @param pSamplesNoList Output storage pointer to an array of sample counts
+ * for each profile target ordered according to NvRmDfsProfileId enumeration.
+ * @param pProfileTimeUsList Output storage pointer to an array of cummulative
+ * execution time in microseconds for each profile target ordered according
+ * to NvRmDfsProfileId enumeration.
+ * @param pDfsPeriodUs Output storage pointer for average DFS sample
+ * period in microseconds.
+ *
+ * @retval NvSuccess if profile information is returned successfully.
+ * @retval NvError_NotSupported if DFS is not ruuning in profiled loop.
+ */
+
+ NvError NvRmDfsGetProfileData(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 DfsProfileCount,
+ NvU32 * pSamplesNoList,
+ NvU32 * pProfileTimeUsList,
+ NvU32 * pDfsPeriodUs );
+
+/**
+ * Starts/Re-starts NV DFS logging.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ */
+
+ void NvRmDfsLogStart(
+ NvRmDeviceHandle hRmDeviceHandle );
+
+/**
+ * Stops DFS logging and gets cumulative mean values of DFS domains frequencies
+ * over logging time.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param LogMeanFreqListCount Number of entries in the pLogMeanFreqList array.
+ * Must be always equal to NvRmDfsClockId_Num.
+ * @param pLogMeanFreqList Pointer to a list filled with mean values of DFS
+ * frequencies, ordered according to NvRmDfsClockId enumeration.
+ * @param pLogLp2TimeMs Pointer to a variable filled with cumulative time spent
+ * in LP2 in milliseconds.
+ * @param pLogLp2Entries Pointer to a variable filled with cumulative number of
+ * LP2 mode entries.
+ *
+ * @retval NvSuccess if mean values are returned successfully.
+ * @retval NvError_NotSupported if DFS is disabled.
+ */
+
+ NvError NvRmDfsLogGetMeanFrequencies(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 LogMeanFreqListCount,
+ NvRmFreqKHz * pLogMeanFreqList,
+ NvU32 * pLogLp2TimeMs,
+ NvU32 * pLogLp2Entries );
+
+/**
+ * Gets specified entry of the detailed DFS activity log.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param EntryIndex Log entrty index.
+ * @param LogDomainsCount The size of activity arrays.
+ * Must be always equal to NvRmDfsClockId_Num.
+ * @param pIntervalMs Pointer to a variable filled with sample interval time
+ * in milliseconds.
+ * @param pLp2TimeMs Pointer to a variable filled with time spent in LP2
+ * in milliseconds.
+ * @param pActiveCyclesList Pointer to a list filled with domain active cycles
+ * within sample interval.
+ * @param pAveragesList Pointer to a list filled with average domain activity
+ * over DFS moving window.
+ * @param pFrequenciesList Pointer to a list filled with instantaneous domains
+ * frequencies.
+ * All lists are ordered according to NvRmDfsClockId enumeration.
+ *
+ * @retval NvSuccess if log entry is retrieved successfully.
+ * @retval NvError_InvalidAddress if requetsed entry is empty.
+ * @retval NvError_NotSupported if DFS is disabled, or detailed logging
+ * is not supported.
+ */
+
+ NvError NvRmDfsLogActivityGetEntry(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 EntryIndex,
+ NvU32 LogDomainsCount,
+ NvU32 * pIntervalMs,
+ NvU32 * pLp2TimeMs,
+ NvU32 * pActiveCyclesList,
+ NvRmFreqKHz * pAveragesList,
+ NvRmFreqKHz * pFrequenciesList );
+
+/**
+ * Gets specified entry of the detailed DFS starvation hints log.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param EntryIndex Log entrty index.
+ * @param pSampleIndex Pointer to a variable filled with sample interval
+ * index in the activity log when this hint is associated with.
+ * @param pStarvationHint Pointer to a variable filled with starvation
+ * hint record.
+ *
+ * @retval NvSuccess if next entry is retrieved successfully.
+ * @retval NvError_InvalidAddress if requetsed entry is empty.
+ * @retval NvError_NotSupported if DFS is disabled, or detailed logging
+ * is not supported.
+ */
+
+ NvError NvRmDfsLogStarvationGetEntry(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 EntryIndex,
+ NvU32 * pSampleIndex,
+ NvU32 * pClientId,
+ NvU32 * pClientTag,
+ NvRmDfsStarvationHint * pStarvationHint );
+
+/**
+ * Gets specified entry of the detailed DFS busy hints log.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param EntryIndex Log entrty index.
+ * @param pSampleIndex Pointer to a variable filled with sample interval
+ * index in the activity log when this hint is associated with.
+ * @param pBusyHint Pointer to a variable filled with busy
+ * hint record.
+ *
+ * @retval NvSuccess if next entry is retrieved successfully.
+ * @retval NvError_InvalidAddress if requetsed entry is empty.
+ * @retval NvError_NotSupported if DFS is disabled, or detailed logging
+ * is not supported.
+ */
+
+ NvError NvRmDfsLogBusyGetEntry(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 EntryIndex,
+ NvU32 * pSampleIndex,
+ NvU32 * pClientId,
+ NvU32 * pClientTag,
+ NvRmDfsBusyHint * pBusyHint );
+
+/**
+ * Gets low threshold and present voltage on the given rail.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param RailId The targeted voltage rail ID.
+ * @param pLowMv Output storage pointer for low voltage threshold (in
+ * millivolt). NvRmVoltsUnspecified is returned if targeted rail does
+ * not exist on SoC.
+ * @param pPresentMv Output storage pointer for present rail voltage (in
+ * millivolt). NvRmVoltsUnspecified is returned if targeted rail does
+ * not exist on SoC.
+ */
+
+ void NvRmDfsGetLowVoltageThreshold(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmDfsVoltageRailId RailId,
+ NvRmMilliVolts * pLowMv,
+ NvRmMilliVolts * pPresentMv );
+
+/**
+ * Sets low threshold for the given rail. The actual rail voltage is scaled
+ * to match SoC clock frequencies, but not below the specified threshold.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param RailId The targeted voltage rail ID.
+ * @param LowMv Low voltage threshold (in millivolts) for the targeted rail.
+ * Ignored if targeted rail does not exist on SoC.
+ */
+
+ void NvRmDfsSetLowVoltageThreshold(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmDfsVoltageRailId RailId,
+ NvRmMilliVolts LowMv );
+
+/**
+ * Notifies RM Kernel about entering Suspend state.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ *
+ * @retval NvSuccess if notifying RM entering Suspend state successfully.
+ */
+
+ NvError NvRmKernelPowerSuspend(
+ NvRmDeviceHandle hRmDeviceHandle );
+
+/**
+ * Notifies RM kernel about entering Resume state.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ *
+ * @retval NvSuccess if notifying RM entering Resume state successfully.
+ */
+
+ NvError NvRmKernelPowerResume(
+ NvRmDeviceHandle hRmDeviceHandle );
+
+/** @} */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_pwm.h b/arch/arm/mach-tegra/include/nvrm_pwm.h
new file mode 100644
index 000000000000..d1011dc77439
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_pwm.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_pwm_H
+#define INCLUDED_nvrm_pwm_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_pinmux.h"
+#include "nvrm_module.h"
+#include "nvrm_init.h"
+
+#include "nvos.h"
+#include "nvcommon.h"
+
+/**
+ * NvRmPwmHandle is an opaque handle to the NvRmPwmStructRec interface
+ */
+
+typedef struct NvRmPwmRec *NvRmPwmHandle;
+
+/**
+ * Defines possible PWM modes.
+ */
+
+typedef enum
+{
+
+ /// Specifies Pwm disable mode
+ NvRmPwmMode_Disable = 1,
+
+ /// Specifies Pwm enable mode
+ NvRmPwmMode_Enable,
+
+ /// Specifies Blink LED enabled mode
+ NvRmPwmMode_Blink_LED,
+
+ /// Specifies Blink output 32KHz clock enable mode
+ NvRmPwmMode_Blink_32KHzClockOutput,
+
+ /// Specifies Blink disabled mode
+ NvRmPwmMode_Blink_Disable,
+ NvRmPwmMode_Num,
+ NvRmPwmMode_Force32 = 0x7FFFFFFF
+} NvRmPwmMode;
+
+/**
+ * Defines the possible PWM output pin
+ */
+
+typedef enum
+{
+
+ /// Specifies PWM Output-0
+ NvRmPwmOutputId_PWM0 = 1,
+
+ /// Specifies PWM Output-1
+ NvRmPwmOutputId_PWM1,
+
+ /// Specifies PWM Output-2
+ NvRmPwmOutputId_PWM2,
+
+ /// Specifies PWM Output-3
+ NvRmPwmOutputId_PWM3,
+
+ /// Specifies PMC Blink LED
+ NvRmPwmOutputId_Blink,
+ NvRmPwmOutputId_Num,
+ NvRmPwmOutputId_Force32 = 0x7FFFFFFF
+} NvRmPwmOutputId;
+
+/**
+ * @brief Initializes and opens the pwm channel. This function allocates the
+ * handle for the pwm channel and provides it to the client.
+ *
+ * Assert encountered in debug mode if passed parameter is invalid.
+ *
+ * @param hDevice Handle to the Rm device which is required by Rm to acquire
+ * the resources from RM.
+ * @param phPwm Points to the location where the Pwm handle shall be stored.
+ *
+ * @retval NvSuccess Indicates that the Pwm channel has successfully opened.
+ * @retval NvError_InsufficientMemory Indicates that function fails to allocate
+ * the memory.
+ * @retval NvError_NotInitialized Indicates the Pwm initialization failed.
+ */
+
+ NvError NvRmPwmOpen(
+ NvRmDeviceHandle hDevice,
+ NvRmPwmHandle * phPwm );
+
+/**
+ * @brief Closes the Pwm channel. This function frees the memory allocated for
+ * the pwm handle for the pwm channel.
+ * This function de-initializes the pwm channel. This API never fails.
+ *
+ * @param hPwm A handle from NvRmPwmOpen(). If hPwm is NULL, this API does
+ * nothing.
+ */
+
+ void NvRmPwmClose(
+ NvRmPwmHandle hPwm );
+
+/**
+ * @brief Configure PWM module as disable/enable. Also, it is used
+ * to set the PWM duty cycle and frequency. Beside that, it is
+ * used to configure PMC' blinking LED if OutputId is NvRmPwmOutputId_Blink
+ *
+ * @param hPwm Handle to the PWM channel.
+ * * @param OutputId The output pin to config. Allowed OutputId values are
+ * defined in ::NvRmPwmOutputId
+ * @param Mode The mode type to config. Allowed mode values are
+ * defined in ::NvRmPwmMode
+ * @param DutyCycle The duty cycle is an unsigned 15.16 fixed point
+ * value that represents PWM duty cycle in percentage range from
+ * 0.00 to 100.00. For example, 10.5 percentage duty cycle would be
+ * represented as 0x000A8000. This parameter is ignored if NvRmPwmMode
+ * is NvRmPwmMode_Blink_32KHzClockOutput or NvRmPwmMode_Blink_Disable
+ * @param RequestedFreqHzOrPeriod The requested frequency in Hz or Period
+ * A requested frequency value beyond the max supported value will be
+ * clamped to the max supported value.
+ * If PMC Blink LED is used, this parameter is represented as
+ * request period time in second unit. This parameter is ignored if
+ * NvRmPwmMode is NvRmPwmMode_Blink_32KHzClockOutput or
+ * NvRmPwmMode_Blink_Disable
+ *
+ * @param pCurrentFreqHzOrPeriod Pointer to the returns frequency of
+ * that mode. If PMC Blink LED is used then it is the pointer to
+ * the returns period time. This parameter is ignored if NvRmPwmMode
+ * is NvRmPwmMode_Blink_32KHzClockOutput or NvRmPwmMode_Blink_Disable
+ *
+ * @retval NvSuccess Indicates the configuration succeeded.
+ */
+
+ NvError NvRmPwmConfig(
+ NvRmPwmHandle hPwm,
+ NvRmPwmOutputId OutputId,
+ NvRmPwmMode Mode,
+ NvU32 DutyCycle,
+ NvU32 RequestedFreqHzOrPeriod,
+ NvU32 * pCurrentFreqHzOrPeriod );
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_rmctrace.h b/arch/arm/mach-tegra/include/nvrm_rmctrace.h
new file mode 100644
index 000000000000..22a9eb45eb71
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_rmctrace.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_RMCTRACE_H
+#define INCLUDED_NVRM_RMCTRACE_H
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvrm_init.h"
+
+/**
+ * RMC is a file format for capturing accesses to hardware, both memory
+ * and register, that may be played back against a simulator. Drivers
+ * are expected to emit RMC tracing if RMC tracing is enabled.
+ *
+ * The RM will already have an RMC file open before any drivers are expected
+ * to access it, so it is not necessary for NvRmRmcOpen or Close to be called
+ * by anyone except the RM itself (but drivers may want to if capturing a
+ * subset of commands is useful).
+ */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+#if !defined(NV_OAL)
+#define NV_OAL 0
+#endif
+
+// FIXME: better rmc compile time macros
+#if !defined(NV_DEF_RMC_TRACE)
+#if NV_DEBUG && !NV_OAL
+#define NV_DEF_RMC_TRACE 1
+#else
+#define NV_DEF_RMC_TRACE 0
+#endif
+#endif
+
+/**
+ * exposed structure for RMC files.
+ */
+typedef struct NvRmRMCFile_t
+{
+ NvOsFileHandle file;
+ NvBool enable; /* enable bit for writes */
+} NvRmRmcFile;
+
+/**
+ * opens the an RMC file.
+ *
+ * @param name The name of the rmc file
+ * @param rmc Out param - the opened rmc file (if successful)
+ *
+ * NvOsFile* operatations should not be used directly since RMC commands
+ * or comments may be emited to the file on open/close/etc.
+ */
+NvError
+NvRmRmcOpen( const char *name, NvRmRmcFile *rmc );
+
+/**
+ * closes an RMC file.
+ *
+ * @param rmc The rmc file to close.
+ */
+void
+NvRmRmcClose( NvRmRmcFile *rmc );
+
+/**
+ * emits a string to the RMC file.
+ *
+ * @param file The RMC file
+ * @param format Printf style argument format string
+ *
+ * NvRmRmcOpen must be called before this function.
+ *
+ * This function should be called via a macro so that it may be compiled out.
+ * Note that double parens will be needed:
+ *
+ * NVRM_RMC_TRACE(( file, "# filling memory with stuff\n" ));
+ */
+void
+NvRmRmcTrace( NvRmRmcFile *rmc, const char *format, ... );
+
+/**
+ * retrieves the RM's global RMC file.
+ *
+ * @param hDevice The RM instance
+ * @param file Output param: the RMC file
+ */
+NvError
+NvRmGetRmcFile( NvRmDeviceHandle hDevice, NvRmRmcFile **file );
+
+#if NV_DEF_RMC_TRACE
+#define NVRM_RMC_TRACE(a) NvRmRmcTrace a
+/**
+ * enable or disable RMC tracing at runtime.
+ *
+ * @param file The RMC file
+ * @param enable Either enable or disable rmc tracing
+ */
+#define NVRM_RMC_ENABLE(f, e) \
+ ((f)->enable = (e))
+
+#define NVRM_RMC_IS_ENABLED(f) \
+ ((f)->enable != 0)
+
+#else
+#define NVRM_RMC_TRACE(a) (void)0
+#define NVRM_RMC_ENABLE(f,e) (void)0
+#define NVRM_RMC_IS_ENABLED(f) (void)0
+#endif
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* NVRM_RMCTRACE_H */
diff --git a/arch/arm/mach-tegra/include/nvrm_spi.h b/arch/arm/mach-tegra/include/nvrm_spi.h
new file mode 100644
index 000000000000..e5bee1e1bd1c
--- /dev/null
+++ b/arch/arm/mach-tegra/include/nvrm_spi.h
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_nvrm_spi_H
+#define INCLUDED_nvrm_spi_H
+
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include "nvrm_pinmux.h"
+#include "nvrm_module.h"
+#include "nvrm_init.h"
+
+#include "nvcommon.h"
+
+/**
+ * NvRmSpiHandle is an opaque context to the NvRmSpiRec interface.
+ */
+
+typedef struct NvRmSpiRec *NvRmSpiHandle;
+
+/**
+ * Open the handle for the spi/sflash controller. This api initalise the
+ * sflash/spi controller.
+ * The Instance Id for the sflash and spi controller start from 0.
+ * The handle for the spi/sflash is open in master and slave mode based on the
+ * parameter passed. If the spi handle is opened in master mode the the SPICLK
+ * is generated from the spi controller and it acts like a master for all the
+ * transaction.
+ *
+ * If the spi handle is opened in master mode then the controller can be shared
+ * between different chip select client but if the spi handle is created in the
+ * slave mode then it can not be shared by other client and only one client is
+ * allowed to open the spi handle for the slave mode.
+ *
+ * Assert encountered in debug mode if invalid parameter passed.
+ *
+ * @param hRmDevice Handle to the Rm device.
+ * @param IoModule The Rm IO module to set whether this is the
+ * NvOdmIoModule_Sflash or NvOdmIoModule_Slink or NvOdmIoModule_Spi.
+ * @param InstanceId The Instance Id which starts from the 0.
+ * @param IsMasterMode Tells whether the controller will be open in master mode
+ * or the slave mode?
+ * @param phRmSpi Pointer to the sflash/spi handle where the allocated handle
+ * will be stored.
+ *
+ * @retval NvSuccess Indicates the function is successfully completed
+ * @retval NvError_MemoryMappingFail Indicates the address mapping of the
+ * register failed.
+ * @retval NvError_InsufficientMemory Indicates that memory allocation is
+ * failed.
+ * @retval NvError_NotSupported Indicases that the spi is not supported.
+ * @retval NvError_AlreadyAllocated Indicases that the spi handle is already
+ * allocated to the other slave client.
+ */
+
+ NvError NvRmSpiOpen(
+ NvRmDeviceHandle hRmDevice,
+ NvU32 IoModule,
+ NvU32 InstanceId,
+ NvBool IsMasterMode,
+ NvRmSpiHandle * phRmSpi );
+
+/**
+ * Deinitialize the spi controller, disable the clock and release the spi
+ * handle.
+ *
+ * @param hRmSpi A handle from NvRmSpiOpen(). If hRmSpi is NULL, this API does
+ * nothing.
+ */
+
+ void NvRmSpiClose(
+ NvRmSpiHandle hRmSpi );
+
+/**
+ * Performs an Spi controller read and write simultaneously in master mode.
+ * This apis is only supported if the handle is open in master mode.
+ *
+ * Every Spi transaction is by definition a simultaneous read and write transaction, so
+ * there are no separate APIs for read versus write. However, if you only need
+ * to do a read or write, this API allows you to declare that you are not
+ * interested in the read data, or that the write data is not of interest.
+ * If only read is required then client can pass the NULL pointer to the
+ * pWriteBuffer. Zeros will be sent in this case.
+ * Similarly, if client wants to send data only then he can pass the
+ * pReadBuffer as NULL.
+ * If Read and write is required and he wants to first send the command and
+ * then want to read the response, then he need to send both the valid pointer
+ * read and write. In this case the bytesRequested will be the sum of the
+ * send command size and response size. The size of the pReadBuffer and
+ * pWriteBuffer should be equal to the bytes requetsed.
+ * E.g. Client want to send the 4byte command first and the wants to read the
+ * 4 byte response, then he need a 8 byte pWriteBuffer and 8 byte pReadBuffer.
+ * He will fill the first 4 byte of pWriteBuffer with the command which he
+ * wants to send. After calling this api, he needs to ignore the first 4 bytes
+ * and use the next 4 byte as valid response data in the pReadBuffer.
+ *
+ * This is a blocking API. It will returns when all the data has been transferred
+ * over the pins of the SOC (the transaction).
+ *
+ * Several Spi transactions may be performed in a single call to this API, but
+ * only if all of the transactions are to the same chip select and have the same
+ * packet size.
+ *
+ * Transaction sizes from 1 to 32 bits are supported. However, all of the
+ * packets are byte-aligned in memory. Like, if packetBitLength is 12 bit
+ * then client needs the 2 byte for the 1 packet. New packets start from the
+ * new bytes e.g. byte0 and byte1 contain the first packet and byte2 and byte3
+ * will contain the second packets.
+ *
+ * To perform one transaction, the BytesRequested argument should be:
+ *
+ * (PacketSizeInBits + 7)/8
+ *
+ * To perform n transactions, BytesRequested should be:
+ *
+ * n*((PacketSizeInBits + 7)/8)
+ *
+ * Within a given
+ * transaction with the packet size larger than 8 bits, the bytes are stored in
+ * order of the MSB (most significant byte) first.
+ * The Packet is formed with the first Byte will be in MSB and then next byte
+ * will be in the next MSB towards the LSB.
+ *
+ * For the example, if One packet need to be send and its size is the 20 bit
+ * then it will require the 3 bytes in the pWriteBuffer and arrangement of the
+ * data are as follows:
+ * The packet is 0x000ABCDE (Packet with length of 20 bit).
+ * pWriteBuff[0] = 0x0A
+ * pWriteBuff[1] = 0xBC
+ * pWtriteBuff[2] = 0xDE
+ *
+ * The most significant bit will be transmitted first i.e. bit20 is transmitted
+ * first and bit 0 will be transmitted last.
+ *
+ * If the transmitted packet (command + receive data) is more than 32 like 33 and
+ * want to transfer in the single call (CS should be active) then it can be transmitted
+ * in following way:
+ * The transfer is command(8 bit)+Dummy(1bit)+Read (24 bit) = 33 bit of transfer.
+ * - Send 33 bit as 33 byte and each byte have the 1 valid bit, So packet bit length = 1 and
+ * bytes requested = 33.
+ * NvU8 pSendData[33], pRecData[33];
+ * pSendData[0] = (Comamnd >>7) & 0x1;
+ * pSendData[1] = (Command >> 6)& 0x1;
+ * ::::::::::::::
+ * pSendData[8] = DummyBit;
+ * pSendData[9] to pSendData[32] = 0;
+ * Call NvRmSpiTransaction(hRmSpi,SpiPinMap,ChipSelect,ClockSpeedInKHz,pRecData, pSendData, 33,1);
+ * Now You will get the read data from pRecData[9] to pRecData[32] on bit 0 on each byte.
+ *
+ * - The 33 bit transfer can be also done as 11 byte and each byte have the 3 valid bits.
+ * This need to rearrange the command in the pSendData in such a way that each byte have the
+ * 3 valid bits.
+ * NvU8 pSendData[11], pRecData[11];
+ * pSendData[0] = (Comamnd >>4) & 0x7;
+ * pSendData[1] = (Command >> 1)& 0x7;
+ * pSendData[2] = (((Command)& 0x3) <<1) | DummyBit;
+ * pSendData[3] to pSendData[10] = 0;
+ *
+ * Call NvRmSpiTransaction(hRmSpi,SpiPinMap,ChipSelect,ClockSpeedInKHz,pRecData, pSendData, 11,3);
+ * Now You will get the read data from pRecData[4] to pRecData[10] on lower 3 bits on each byte.
+ *
+ * Similarly the 33 bit transfer can also be done as 6 byte and each 2 bytes contain the 11 valid bits.
+ * Call NvRmSpiTransaction(hRmSpi,SpiPinMap,ChipSelect,ClockSpeedInKHz,pRecData, pSendData, 6,11);
+ *
+ * pReadBuffer and pWriteBuffer may be the same pointer, in which case the
+ * write data is destroyed as we read in the read data. Unless they are
+ * identical pointers, however, pReadBuffer and pWriteBuffer must not overlap.
+ *
+ * @param hOdmSpi The Spi handle allocated in a call to NvOdmSpiOpen().
+ * @param SpiPinMap For SPI master-mode controllers which are being multiplexed across
+ * multiple pin mux configurations, this specifies which pin mux configuration
+ * should be used for the transaction. Must be 0 when the ODM pin mux query
+ * specifies a non-multiplexed configuration for the controller.
+ * @param ChipSelectId The chip select Id on which device is connected.
+ * @param ClockSpeedInKHz The clock speed in KHz on which device can communicate.
+ * @param pReadBuffer A pointer to buffer to be filled in with read data. If this
+ * pointer is NULL, the read data will be discarded.
+ * @param pWriteBuffer A pointer to a buffer from which to obtain write data. If this
+ * pointer is NULL, the write data will be all zeros.
+ * @param BytesRequested The size of pReadBuffer and pWriteBuffer buffers in bytes.
+ * @param PacketSizeInBits The packet size in bits of each Spi transaction.
+ *
+ */
+
+ void NvRmSpiTransaction(
+ NvRmSpiHandle hRmSpi,
+ NvU32 SpiPinMap,
+ NvU32 ChipSelectId,
+ NvU32 ClockSpeedInKHz,
+ NvU8 * pReadBuffer,
+ NvU8 * pWriteBuffer,
+ NvU32 BytesRequested,
+ NvU32 PacketSizeInBits );
+
+/**
+ * Start an Spi controller read and write simultaneously in the slave mode.
+ * This API is only supported for the spi handle which is opened in slave mode.
+ *
+ * This API will assert if opened spi handle is the master type.
+ *
+ * Every Spi transaction is by definition a simultaneous read and write
+ * transaction, so there are no separate APIs for read versus write.
+ * However, if you only need to start a read or write transaction, this API
+ * allows you to declare that you are not interested in the read data,
+ * or that the write data is not of interest.
+ * If only read is required to start then client can pass NV_TRUE to the the
+ * IsReadTransfer and NULL pointer to the pWriteBuffer. The state of the dataout
+ * will be set by IsIdleDataOutHigh of the structure NvOdmQuerySpiIdleSignalState
+ * in nvodm_query.h.
+ * Similarly, if client wants to send data only then he can pass NV_FALSE to the
+ * IsReadTransfer.
+ *
+ * This is a nonblocking API. This api start the data transfer and returns to the
+ * caller without waiting for the data transfer completion.
+ *
+ * Transaction sizes from 1 to 32 bits are supported. However, all of the
+ * packets are byte-aligned in memory. Like, if packetBitLength is 12 bit
+ * then client needs the 2 byte for the 1 packet. New packets start from the
+ * new bytes e.g. byte0 and byte1 contain the first packet and byte2 and byte3
+ * will contain the second packets.
+ *
+ * To perform one transaction, the BytesRequested argument should be:
+ *
+ * (PacketSizeInBits + 7)/8
+ *
+ * To perform n transactions, BytesRequested should be:
+ *
+ * n*((PacketSizeInBits + 7)/8)
+ *
+ * Within a given
+ * transaction with the packet size larger than 8 bits, the bytes are stored in
+ * order of the LSB (least significant byte) first.
+ * The Packet is formed with the first Byte will be in LSB and then next byte
+ * will be in the next LSB towards the MSB.
+ *
+ * For the example, if One packet need to be send and its size is the 20 bit
+ * then it will require the 3 bytes in the pWriteBuffer and arrangement of the
+ * data are as follows:
+ * The packet is 0x000ABCDE (Packet with length of 20 bit).
+ * pWriteBuff[0] = 0xDE
+ * pWriteBuff[1] = 0xBC
+ * pWtriteBuff[2] = 0x0A
+ *
+ * The most significant bit will be transmitted first i.e. bit20 is transmitted
+ * first and bit 0 will be transmitted last.
+ *
+ * @see NvRmSpiGetTransactionData
+ * Typical usecase for the CAIF interface. The step for doing the transfer is:
+ * 1. ACPU calls the NvRmSpiStartTransaction() to configure the spi controller
+ * to set in the receive or transmit mode and make ready for the data transfer.
+ * 2. ACPU then send the signal to the CCPU to send the SPICLK (by activating
+ * the SPI_INT) and start the transaction. CCPU get this signal and start sending
+ * SPICLK.
+ * 3. ACPU will call the NvRmSpiGetTransactionData() to get the data/information
+ * about the transaction.
+ * 4. After completion of the transfer ACPU inactivate the SPI_INT.
+ *
+ * @param hOdmSpi The Spi handle allocated in a call to NvOdmSpiOpen().
+ * @param ChipSelectId The chip select Id on which device is connected.
+ * @param ClockSpeedInKHz The clock speed in KHz on which device can communicate.
+ * @param IsReadTransfer It tells that whether the read transfer is required or
+ * not. If it is NV_TRUE then read transfer is required and the read data will be
+ * available in the local buffer of the driver. The client will get the received
+ * data after calling the NvRmSpiGetTransactionData().
+ * @param pWriteBuffer A pointer to a buffer from which to obtain write data. If this
+ * pointer is NULL, the write data will be all zeros.
+ * @param BytesRequested The size of pReadBuffer and pWriteBuffer buffers in bytes.
+ * @param PacketSizeInBits The packet size in bits of each Spi transaction.
+ *
+ */
+
+ NvError NvRmSpiStartTransaction(
+ NvRmSpiHandle hRmSpi,
+ NvU32 ChipSelectId,
+ NvU32 ClockSpeedInKHz,
+ NvBool IsReadTransfer,
+ NvU8 * pWriteBuffer,
+ NvU32 BytesRequested,
+ NvU32 PacketSizeInBits );
+
+/**
+ * Get the spi transaction status that is started for the slave mode and wait
+ * if required till the transfer completes for a given timeout error.
+ * If read transaction has been started then it will return the receive data to
+ * the client.
+ *
+ * This is a blocking API and wait for the data transfer completion till the
+ * data requested transfer completes or the timeout happen.
+ *
+ * @see NvRmSpiStartTransaction
+ *
+ * @param hOdmSpi The Spi handle allocated in a call to NvOdmSpiOpen().
+ * @param pReadBuffer A pointer to buffer to be filled in with read data. If this
+ * pointer is NULL, the read data will be discarded.
+ * @param BytesRequested The size of pReadBuffer and pWriteBuffer buffers in bytes.
+ * @param BytesTransfererd The number of bytes transferred.
+ * @param WaitTimeout The timeout in millisecond to wait for the trsnaction to be
+ * completed.
+ *
+ * @retval NvSuccess Indicates that the operation succeeded.
+ * @retval NvError_Timeout Indicates that the timeout happen.
+ * @retval NvError_InvalidState Indicates that the transfer has not been started.
+ *
+ */
+
+ NvError NvRmSpiGetTransactionData(
+ NvRmSpiHandle hRmSpi,
+ NvU8 * pReadBuffer,
+ NvU32 BytesRequested,
+ NvU32 * pBytesTransfererd,
+ NvU32 WaitTimeout );
+
+/**
+ * Set the signal mode for the spi communication for a given chip select.
+ * After calling this API, the further communication happen with the new
+ * configured signal modes.
+ * The default value of the signal mode is taken from nvodm query and this
+ * api will override the signal mode which is read from query.
+ *
+ * @see NvRmSpiStartTransaction
+ *
+ * @param hOdmSpi The Spi handle allocated in a call to NvOdmSpiOpen().
+ * @param ChipSelectId The chip select Id on which device is connected.
+ * @param SpiSignalMode The nvodm signal modes which need to be set.
+ *
+ */
+
+ void NvRmSpiSetSignalMode(
+ NvRmSpiHandle hRmSpi,
+ NvU32 ChipSelectId,
+ NvU32 SpiSignalMode );
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-tegra/nvrm/Makefile b/arch/arm/mach-tegra/nvrm/Makefile
new file mode 100644
index 000000000000..8f325d6f17a1
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/Makefile
@@ -0,0 +1,18 @@
+ccflags-y += -DNV_IS_AVP=0
+ccflags-y += -DNV_OAL=0
+ccflags-y += -DNV_USE_FUSE_CLOCK_ENABLE=0
+ifeq ($(CONFIG_MACH_TEGRA_GENERIC_DEBUG),y)
+ccflags-y += -DNV_DEBUG=1
+else
+ccflags-y += -DNV_DEBUG=0
+endif
+
+obj-y += core/ap15/
+obj-y += core/ap20/
+obj-y += core/common/
+
+obj-y += io/common/
+obj-y += io/ap15/
+obj-y += io/ap20/
+
+obj-y += dispatch/
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/Makefile b/arch/arm/mach-tegra/nvrm/core/ap15/Makefile
new file mode 100644
index 000000000000..82ad2b2208eb
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/Makefile
@@ -0,0 +1,30 @@
+ccflags-y += -DNV_IS_AVP=0
+ccflags-y += -DNV_OAL=0
+ccflags-y += -DNV_USE_FUSE_CLOCK_ENABLE=0
+ifeq ($(CONFIG_MACH_TEGRA_GENERIC_DEBUG),y)
+ccflags-y += -DNV_DEBUG=1
+else
+ccflags-y += -DNV_DEBUG=0
+endif
+
+obj-y += ap15rm_interrupt_generic.o
+obj-y += ap15rm_hwmap.o
+obj-y += ap15rm_gart.o
+obj-y += ap15rm_clocks.o
+obj-y += ap15rm_clock_config.o
+obj-y += ap15rm_clocks_info.o
+obj-y += nvrm_clocks.o
+obj-y += ap15rm_pinmux_tables.o
+obj-y += ap16rm_pinmux_tables.o
+obj-y += ap15rm_power.o
+obj-y += ap15rm_power_dfs.o
+obj-y += ap15rm_power_oalintf.o
+obj-y += ap15rm_clock_misc.o
+obj-y += ap15rm_memctrl.o
+obj-y += ap15rm_fuse.o
+obj-y += nvrm_diag.o
+obj-y += ap15rm_reloctable.o
+obj-y += ap16rm_reloctable.o
+obj-y += ap15rm_init.o
+obj-y += ap15rm_init_common.o
+obj-y += ap15rm_interrupt.o
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clock_config.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clock_config.c
new file mode 100644
index 000000000000..198a2b20ce3d
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clock_config.c
@@ -0,0 +1,2689 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvassert.h"
+#include "nvrm_clocks.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_module.h"
+#include "nvrm_drf.h"
+#include "ap15/aremc.h"
+#include "ap15/arclk_rst.h"
+#include "ap15/arapb_misc.h"
+#include "ap15rm_clocks.h"
+#include "ap15rm_private.h"
+#include "nvrm_pmu_private.h"
+#include "nvodm_query_discovery.h"
+#include "nvodm_query_memc.h"
+#include "ap20/ap20rm_clocks.h"
+
+// TODO: CAR and EMC access macros for time critical access
+
+/*****************************************************************************/
+
+static const NvU32 s_Ap15OscFreqKHz[] = { 13000, 19200, 12000, 26000 };
+
+static void
+Ap15PllPConfigure(NvRmDeviceHandle hRmDevice);
+
+static void
+Ap15MioReconfigure(NvRmDeviceHandle hRmDevice, NvRmFreqKHz MioKHz);
+
+static void
+Ap15AudioSyncInit(NvRmDeviceHandle hRmDevice, NvRmFreqKHz AudioSyncKHz);
+
+static NvError
+NvRmPrivOscDoublerConfigure(NvRmDeviceHandle hRmDevice, NvRmFreqKHz OscKHz)
+{
+ switch (hRmDevice->ChipId.Id)
+ {
+ case 0x15:
+ case 0x16:
+ return NvRmPrivAp15OscDoublerConfigure(hRmDevice, OscKHz);
+ case 0x20:
+ return NvRmPrivAp20OscDoublerConfigure(hRmDevice, OscKHz);
+ default:
+ NV_ASSERT(!"Unsupported chip ID");
+ return NvError_NotSupported;
+ }
+}
+
+void
+NvRmPrivClockSourceFreqInit(
+ NvRmDeviceHandle hRmDevice,
+ NvU32* pClockSourceFreq)
+{
+ NvU32 reg;
+ const NvRmCoreClockInfo* pCore = NULL;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pClockSourceFreq);
+
+ /*
+ * Fixed clock sources: 32kHz, main oscillator and doubler
+ * (OSC control should be already configured by the boot code)
+ */
+ pClockSourceFreq[NvRmClockSource_ClkS] = 32;
+
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_OSC_CTRL_0);
+ pClockSourceFreq[NvRmClockSource_ClkM] =
+ s_Ap15OscFreqKHz[NV_DRF_VAL(CLK_RST_CONTROLLER, OSC_CTRL, OSC_FREQ, reg)];
+
+ if (NvSuccess == NvRmPrivOscDoublerConfigure(
+ hRmDevice, pClockSourceFreq[NvRmClockSource_ClkM]))
+ {
+ pClockSourceFreq[NvRmClockSource_ClkD] =
+ pClockSourceFreq[NvRmClockSource_ClkM] << 1;
+ }
+ else
+ pClockSourceFreq[NvRmClockSource_ClkD] = 0;
+
+ /*
+ * PLLs and secondary PLL dividers
+ */
+ #define INIT_PLL_FREQ(PllId) \
+ do\
+ {\
+ pClockSourceFreq[NvRmClockSource_##PllId] = NvRmPrivAp15PllFreqGet( \
+ hRmDevice, NvRmPrivGetClockSourceHandle(NvRmClockSource_##PllId)->pInfo.pPll); \
+ } while(0)
+
+ // PLLX (check if present, keep boot settings
+ // and just init frequency table)
+ if (NvRmPrivGetClockSourceHandle(NvRmClockSource_PllX0))
+ {
+ INIT_PLL_FREQ(PllX0);
+ }
+ // PLLC with output divider (if enabled keep boot settings and just init
+ // frequency table, if disbled or bypassed - configure)
+ INIT_PLL_FREQ(PllC0);
+ if (pClockSourceFreq[NvRmClockSource_PllC0] <=
+ pClockSourceFreq[NvRmClockSource_ClkM])
+ {
+ NvRmFreqKHz f = NVRM_PLLC_DEFAULT_FREQ_KHZ;
+ NvRmPrivAp15PllConfigureSimple(hRmDevice, NvRmClockSource_PllC0, f, &f);
+ }
+ pClockSourceFreq[NvRmClockSource_PllC1] = NvRmPrivDividerFreqGet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllC1)->pInfo.pDivider);
+
+ // PLLM with output divider (keep boot settings
+ // and just init frequency)
+ INIT_PLL_FREQ(PllM0);
+ pClockSourceFreq[NvRmClockSource_PllM1] = NvRmPrivDividerFreqGet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllM1)->pInfo.pDivider);
+#if !NV_OAL
+ // PLLD and PLLU with no output dividers (keep boot settings
+ // and just init frequency table)
+ INIT_PLL_FREQ(PllD0);
+ INIT_PLL_FREQ(PllU0);
+#endif
+
+ // PLLP and output dividers: set PLLP fixed frequency and enable dividers
+ // with fixed settings in override mode, so they can be changed later, as
+ // necessary. Switch system clock to oscillator during PLLP reconfiguration
+ INIT_PLL_FREQ(PllP0);
+ if (pClockSourceFreq[NvRmClockSource_PllP0] != NVRM_PLLP_FIXED_FREQ_KHZ)
+ {
+ pCore = NvRmPrivGetClockSourceHandle(
+ NvRmClockSource_SystemBus)->pInfo.pCore;
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCore->SelectorOffset);
+ NvRmPrivCoreClockSet(hRmDevice, pCore, NvRmClockSource_ClkM, 0, 0);
+ Ap15PllPConfigure(hRmDevice);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCore->SelectorOffset, reg);
+ }
+ NV_ASSERT(pClockSourceFreq[NvRmClockSource_PllP0] == NVRM_PLLP_FIXED_FREQ_KHZ);
+ NvRmPrivDividerSet(
+ hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllP1)->pInfo.pDivider,
+ NVRM_FIXED_PLLP1_SETTING);
+ NvRmPrivDividerSet(
+ hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllP2)->pInfo.pDivider,
+ NVRM_FIXED_PLLP2_SETTING);
+ NvRmPrivDividerSet(
+ hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllP3)->pInfo.pDivider,
+ NVRM_FIXED_PLLP3_SETTING);
+ NvRmPrivDividerSet(
+ hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllP4)->pInfo.pDivider,
+ NVRM_FIXED_PLLP4_SETTING);
+
+ // PLLA and output divider must be init after PLLP1, used as a
+ // reference (keep boot settings and just init frequency table)
+ INIT_PLL_FREQ(PllA1);
+ pClockSourceFreq[NvRmClockSource_PllA0] = NvRmPrivDividerFreqGet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllA0)->pInfo.pDivider);
+
+ #undef INIT_PLL_FREQ
+
+ /*
+ * Core and bus clock sources
+ * - Leave CPU bus as set by boot-loader
+ * - Leave System bus as set by boot-loader, make sure all bus dividers are 1:1
+ */
+ pCore = NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBus)->pInfo.pCore;
+ pClockSourceFreq[NvRmClockSource_CpuBus] =
+ NvRmPrivCoreClockFreqGet(hRmDevice, pCore);
+ if (NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBridge))
+ {
+ pClockSourceFreq[NvRmClockSource_CpuBridge] = NvRmPrivDividerFreqGet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBridge)->pInfo.pDivider);
+ }
+ pCore = NvRmPrivGetClockSourceHandle(NvRmClockSource_SystemBus)->pInfo.pCore;
+ pClockSourceFreq[NvRmClockSource_SystemBus] =
+ NvRmPrivCoreClockFreqGet(hRmDevice, pCore);
+ NvRmPrivBusClockInit(
+ hRmDevice, pClockSourceFreq[NvRmClockSource_SystemBus]);
+
+ /*
+ * Initialize AudioSync clocks (PLLA will be re-configured if necessary)
+ */
+ Ap15AudioSyncInit(hRmDevice, NVRM_AUDIO_SYNC_KHZ);
+}
+
+void
+NvRmPrivBusClockInit(NvRmDeviceHandle hRmDevice, NvRmFreqKHz SystemFreq)
+{
+ /*
+ * Set all bus clock frequencies equal to the system clock frequency,
+ * and clear AVP clock skipper i.e., set all bus clock dividers 1:1.
+ * If APB clock is limited below system clock for a particular SoC,
+ * set the APB divider to satisfy this limitation.
+ */
+ NvRmFreqKHz AhbFreq, ApbFreq;
+ NvRmFreqKHz ApbMaxFreq = SystemFreq;
+ if (hRmDevice->ChipId.Id == 0x20)
+ {
+ ApbMaxFreq = NVRM_AP20_APB_MAX_KHZ; // AP20 limitation
+ }
+ AhbFreq = SystemFreq;
+ ApbFreq = NV_MIN(SystemFreq, ApbMaxFreq);
+
+ NvRmPrivBusClockFreqSet(
+ hRmDevice, SystemFreq, &SystemFreq, &AhbFreq, &ApbFreq, ApbMaxFreq);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_COP_CLK_SKIP_POLICY_0, 0x0);
+}
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+static const NvRmFreqKHz s_PllLpCpconSelectionTable[] =
+{
+ NVRM_PLL_LP_CPCON_SELECT_STEPS_KHZ
+};
+static const NvU32 s_PllLpCpconSelectionTableSize =
+NV_ARRAY_SIZE(s_PllLpCpconSelectionTable);
+
+static const NvU32 s_PllMipiCpconSelectionTable[] =
+{
+ NVRM_PLL_MIPI_CPCON_SELECT_STEPS_N_DIVIDER
+};
+static const NvU32 s_PllMipiCpconSelectionTableSize =
+NV_ARRAY_SIZE(s_PllMipiCpconSelectionTable);
+
+static void
+PllLpGetTypicalControls(
+ NvRmFreqKHz InputKHz,
+ NvU32 M,
+ NvU32 N,
+ NvU32* pCpcon)
+{
+ NvU32 i;
+ if (N >= NVRM_PLL_LP_MIN_N_FOR_CPCON_SELECTION)
+ {
+ // CPCON depends on comparison frequency
+ for (i = 0; i < s_PllLpCpconSelectionTableSize; i++)
+ {
+ if (InputKHz >= s_PllLpCpconSelectionTable[i] * M)
+ break;
+ }
+ *pCpcon = i + 1;
+ }
+ else // CPCON is 1, regardless of frequency
+ {
+ *pCpcon = 1;
+ }
+}
+
+static void
+PllMipiGetTypicalControls(
+ NvU32 N,
+ NvU32* pCpcon,
+ NvU32* pLfCon)
+{
+ NvU32 i;
+
+ // CPCON depends on feedback divider
+ for (i = 0; i < s_PllMipiCpconSelectionTableSize; i++)
+ {
+ if (N <= s_PllMipiCpconSelectionTable[i])
+ break;
+ }
+ *pCpcon = i + 1;
+ *pLfCon = (N >= NVRM_PLL_MIPI_LFCON_SELECT_N_DIVIDER) ? 1 : 0;
+}
+
+void
+NvRmPrivAp15PllSet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmPllClockInfo* pCinfo,
+ NvU32 M,
+ NvU32 N,
+ NvU32 P,
+ NvU32 StableDelayUs,
+ NvU32 cpcon,
+ NvU32 lfcon,
+ NvBool TypicalControls,
+ NvU32 flags)
+{
+ NvU32 base, misc;
+ NvU32 old_base, old_misc;
+ NvU32 delay = 0;
+ NvU32 override = 0;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo);
+ NV_ASSERT(pCinfo->PllBaseOffset);
+ NV_ASSERT(pCinfo->PllMiscOffset);
+
+ /*
+ * PLL control fields used below have the same layout for all PLLs with
+ * the following exceptions:
+ *
+ * a) PLLP base register OVERRIDE field has to be set in order to enable
+ * PLLP re-configuration in diagnostic mode. For other PLLs this field is
+ * "Don't care".
+ * b) PLLU HS P divider field is one bit, inverse logic field. Other control
+ * bits, that are mapped to P divider in common layout should be set to 0.
+ *
+ * PLLP h/w field definitions will be used in DRF macros to construct base
+ * values for all PLLs, with special care of a) and b). All base fields not
+ * explicitly used below are set to 0 for all PLLs.
+ *
+ * c) PLLD/PLLU miscellaneous register has a unique fields determined based
+ * on the input flags. For other PLLs these fields have different meaning,
+ * and will be preserved.
+ *
+ * PLLP h/w field definitions will be used in DRF macros to construct
+ * miscellaneous values with common layout. For unique fields PLLD h/w
+ * definitions will be used. All miscellaneous fields not explicitly used
+ * below are preserved for all PLLs.
+ */
+ base = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->PllBaseOffset);
+ old_base = base;
+
+ // Disable PLL if either input or feedback divider setting is zero
+ if ((M == 0) || (N == 0))
+ {
+ base = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_BYPASS, DISABLE, base);
+ base = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_ENABLE, DISABLE, base);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->PllBaseOffset, base);
+ NvRmPrivPllFreqUpdate(hRmDevice, pCinfo);
+ return;
+ }
+
+ // Determine type-specific controls, construct new misc settings
+ misc = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->PllMiscOffset);
+ old_misc = misc;
+ if (pCinfo->PllType == NvRmPllType_MIPI)
+ {
+ if (flags & NvRmPllConfigFlags_SlowMode)
+ {
+ misc = NV_FLD_SET_DRF_NUM( // "1" = slow (/8) MIPI clock output
+ CLK_RST_CONTROLLER, PLLD_MISC, PLLD_FO_MODE, 1, misc);
+ }
+ else if (flags & NvRmPllConfigFlags_FastMode)
+ {
+ misc = NV_FLD_SET_DRF_NUM( // "0" = fast MIPI clock output
+ CLK_RST_CONTROLLER, PLLD_MISC, PLLD_FO_MODE, 0, misc);
+ }
+ if (flags & NvRmPllConfigFlags_DiffClkEnable)
+ {
+ misc = NV_FLD_SET_DRF_NUM( // Enable differential clocks
+ CLK_RST_CONTROLLER, PLLD_MISC, PLLD_CLKENABLE, 1, misc);
+ }
+ else if (flags & NvRmPllConfigFlags_DiffClkDisable)
+ {
+ misc = NV_FLD_SET_DRF_NUM( // Disable differential clocks
+ CLK_RST_CONTROLLER, PLLD_MISC, PLLD_CLKENABLE, 0, misc);
+ }
+ if (TypicalControls)
+ {
+ PllMipiGetTypicalControls(N, &cpcon, &lfcon);
+ }
+ delay = NVRM_PLL_MIPI_STABLE_DELAY_US;
+ }
+ else if (pCinfo->PllType == NvRmPllType_LP)
+ {
+ if (flags & NvRmPllConfigFlags_DccEnable)
+ {
+ misc = NV_FLD_SET_DRF_NUM( // "1" = enable DCC
+ CLK_RST_CONTROLLER, PLLP_MISC, PLLP_DCCON, 1, misc);
+ }
+ else if (flags & NvRmPllConfigFlags_DccDisable)
+ {
+ misc = NV_FLD_SET_DRF_NUM( // "0" = disable DCC
+ CLK_RST_CONTROLLER, PLLP_MISC, PLLP_DCCON, 0, misc);
+ }
+ if (TypicalControls)
+ {
+ NvRmFreqKHz InputKHz = NvRmPrivGetClockSourceFreq(pCinfo->InputId);
+ PllLpGetTypicalControls(InputKHz, M, N, &cpcon);
+ }
+ lfcon = 0; // always for LP PLL
+ delay = NVRM_PLL_LP_STABLE_DELAY_US;
+ }
+ else if (pCinfo->PllType == NvRmPllType_UHS)
+ {
+ if (TypicalControls) // Same as MIPI typical controls
+ {
+ PllMipiGetTypicalControls(N, &cpcon, &lfcon);
+ }
+ delay = NVRM_PLL_MIPI_STABLE_DELAY_US;
+ P = (P == 0) ? 1 : 0; // P-divider is 1 bit, inverse logic
+ }
+ else
+ {
+ NV_ASSERT(!"Invalid PLL type");
+ }
+ misc = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, PLLP_MISC, PLLP_CPCON, cpcon, misc);
+ misc = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, PLLP_MISC, PLLP_LFCON, lfcon, misc);
+
+ // Construct new base setting
+ // Override is PLLP specific, and it is just ignored by other PLLs;
+ override = ((flags & NvRmPllConfigFlags_Override) != 0) ?
+ CLK_RST_CONTROLLER_PLLP_BASE_0_PLLP_BASE_OVRRIDE_ENABLE :
+ CLK_RST_CONTROLLER_PLLP_BASE_0_PLLP_BASE_OVRRIDE_DISABLE;
+ { // Compiler failed to generate correct code for the base fields
+ // concatenation without the split below
+ volatile NvU32 prebase =
+ NV_DRF_DEF(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_BYPASS, ENABLE) |
+ NV_DRF_DEF(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_ENABLE, ENABLE) |
+ NV_DRF_DEF(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_REF_DIS, REF_ENABLE);
+ base = prebase |
+ NV_DRF_NUM(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_BASE_OVRRIDE, override) |
+ NV_DRF_NUM(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_DIVP, P) |
+ NV_DRF_NUM(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_DIVN, N) |
+ NV_DRF_NUM(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_DIVM, M);
+ }
+
+ // If PLL is not bypassed, and new configurations is the same as the old
+ // one - exit without overwriting h/w. Otherwise, bypass PLL before
+ // changing configuration.
+ if (NV_DRF_VAL(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_BYPASS, old_base) ==
+ CLK_RST_CONTROLLER_PLLP_BASE_0_PLLP_BYPASS_DISABLE)
+ {
+ old_base = NV_FLD_SET_DRF_DEF(
+ CLK_RST_CONTROLLER, PLLP_BASE, PLLP_BYPASS, ENABLE, old_base);
+ if ((base == old_base) && (misc == old_misc))
+ {
+ NvRmPrivPllFreqUpdate(hRmDevice, pCinfo);
+ return;
+ }
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->PllBaseOffset, old_base);
+ }
+
+ // Configure and enable PLL, keep it bypassed
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->PllMiscOffset, misc);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->PllBaseOffset, base);
+
+ // Wait for PLL to stabilize and switch to PLL output
+ NV_ASSERT(StableDelayUs);
+ if (StableDelayUs > delay)
+ StableDelayUs = delay;
+ NvOsWaitUS(StableDelayUs);
+
+ base = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_BYPASS, DISABLE, base);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->PllBaseOffset, base);
+ NvRmPrivPllFreqUpdate(hRmDevice, pCinfo);
+}
+
+NvRmFreqKHz
+NvRmPrivAp15PllFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmPllClockInfo* pCinfo)
+{
+ NvU32 M, N, P;
+ NvU32 base, misc;
+ NvRmFreqKHz PllKHz;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo);
+ NV_ASSERT(pCinfo->PllBaseOffset);
+ NV_ASSERT(pCinfo->PllMiscOffset);
+
+ /*
+ * PLL control fields used below have the same layout for all PLLs with
+ * the following exceptions:
+ *
+ * a) PLLP base register OVERRIDE field ("Don't care" for other PLLs).
+ * Respectively, PLLP h/w field definitions will be used in DRF macros
+ * to construct base values for all PLLs.
+ *
+ * b) PLLD/PLLU miscellaneous register fast/slow mode control (does not
+ * affect output frequency for other PLLs). Respectively, PLLD h/w field
+ * definitions will be used in DRF macros to construct miscellaneous values.
+ */
+ base = NV_REGR(
+ hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->PllBaseOffset);
+ PllKHz = NvRmPrivGetClockSourceFreq(pCinfo->InputId);
+ NV_ASSERT(PllKHz);
+ NV_ASSERT(NV_DRF_VAL(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_REF_DIS, base) ==
+ CLK_RST_CONTROLLER_PLLP_BASE_0_PLLP_REF_DIS_REF_ENABLE);
+
+ if (NV_DRF_VAL(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_BYPASS, base) ==
+ CLK_RST_CONTROLLER_PLLP_BASE_0_PLLP_BYPASS_DISABLE)
+ {
+ // Special cases: PLL is disabled, or in fixed mode (PLLP only)
+ if (NV_DRF_VAL(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_ENABLE, base) ==
+ CLK_RST_CONTROLLER_PLLP_BASE_0_PLLP_ENABLE_DISABLE)
+ return 0;
+ if ((pCinfo->SourceId == NvRmClockSource_PllP0) &&
+ (NV_DRF_VAL(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_BASE_OVRRIDE, base) ==
+ CLK_RST_CONTROLLER_PLLP_BASE_0_PLLP_BASE_OVRRIDE_DISABLE))
+ return NV_BOOT_PLLP_FIXED_FREQ_KHZ;
+
+ // PLL formula - Output F = (Reference F * N) / (M * 2^P)
+ M = NV_DRF_VAL(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_DIVM, base);
+ N = NV_DRF_VAL(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_DIVN, base);
+ P = NV_DRF_VAL(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_DIVP, base);
+ NV_ASSERT((M != 0) && (N != 0));
+
+ if (pCinfo->PllType == NvRmPllType_UHS)
+ {
+ // Adjust P divider field size and inverse logic for USB HS PLL
+ P = (P & 0x1) ? 0 : 1;
+ }
+ PllKHz = ((PllKHz * N) / M) >> P;
+
+ // Check slow/fast mode selection for MIPI PLLs
+ if (pCinfo->PllType == NvRmPllType_MIPI)
+ {
+ misc = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->PllMiscOffset);
+ if (NV_DRF_VAL(CLK_RST_CONTROLLER, PLLD_MISC, PLLD_FO_MODE, misc) == 1)
+ {
+ PllKHz = PllKHz >> 3; // In slow mode output is divided by 8
+ }
+ }
+ }
+ if (pCinfo->SourceId == NvRmClockSource_PllD0)
+ {
+ PllKHz = PllKHz >> 1; // PLLD output always divided by 2
+ }
+ return PllKHz;
+}
+
+static void
+Ap15PllControl(
+ NvRmDeviceHandle hRmDevice,
+ NvRmClockSource PllId,
+ NvBool Enable)
+{
+ NvU32 base;
+ NvU32 delay = 0;
+ const NvRmPllClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(PllId)->pInfo.pPll;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo->PllBaseOffset);
+
+ /*
+ * PLL control fields used below have the same layout for all PLLs.
+ * PLLP h/w field definitions will be used in DRF macros to construct base
+ * values for all PLLs.
+ */
+ base = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->PllBaseOffset);
+
+ if (Enable)
+ {
+ // No need to enable already enabled PLL - do nothing
+ if (NV_DRF_VAL(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_ENABLE, base) ==
+ CLK_RST_CONTROLLER_PLLP_BASE_0_PLLP_ENABLE_ENABLE)
+ return;
+
+ // Get ready stabilization delay
+ if ((pCinfo->PllType == NvRmPllType_MIPI) ||
+ (pCinfo->PllType == NvRmPllType_UHS))
+ delay = NVRM_PLL_MIPI_STABLE_DELAY_US;
+ else if (pCinfo->PllType == NvRmPllType_LP)
+ delay = NVRM_PLL_LP_STABLE_DELAY_US;
+ else
+ NV_ASSERT(!"Invalid PLL type");
+
+ // Bypass PLL => Enable PLL => wait for PLL to stabilize
+ // => switch to PLL output. All other settings preserved.
+ base = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_BYPASS, ENABLE, base);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->PllBaseOffset, base);
+ base = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_ENABLE, ENABLE, base);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->PllBaseOffset, base);
+
+ NvOsWaitUS(delay);
+
+ base = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_BYPASS, DISABLE, base);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->PllBaseOffset, base);
+ }
+ else
+ {
+ // Disable PLL, no bypass. All other settings preserved.
+ base = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_BYPASS, DISABLE, base);
+ base = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, PLLP_BASE, PLLP_ENABLE, DISABLE, base);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->PllBaseOffset, base);
+ }
+ NvRmPrivPllFreqUpdate(hRmDevice, pCinfo);
+}
+
+void
+NvRmPrivAp15PllConfigureSimple(
+ NvRmDeviceHandle hRmDevice,
+ NvRmClockSource PllId,
+ NvRmFreqKHz MaxOutKHz,
+ NvRmFreqKHz* pPllOutKHz)
+{
+#define NVRM_PLL_FCMP_1 (1000)
+#define NVRM_PLL_VCO_RANGE_1 (1000000)
+#define NVRM_PLL_FCMP_2 (2000)
+#define NVRM_PLL_VCO_RANGE_2 (2000000)
+
+ /*
+ * Simple PLL configuration (assuming that target output frequency is
+ * always in VCO range, and does not exceed 2GHz).
+ * - output divider is set 1:1
+ * - input divider is set to get comparison frequency equal or slightly
+ * above 1MHz if VCO is below 1GHz . Otherwise, input divider is set
+ * to get comparison frequency equal or slightly below 2MHz.
+ * - feedback divider is calculated based on target output frequency
+ * With simple configuration the absolute output frequency error does not
+ * exceed half of comparison frequency. It has been verified that simple
+ * configuration provides necessary accuracy for all display pixel clocks
+ * use cases.
+ */
+ NvU32 M, N, P;
+ NvRmFreqKHz RefKHz, VcoKHz;
+ const NvRmPllClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(PllId)->pInfo.pPll;
+ NvU32 flags = 0;
+
+ NV_ASSERT(hRmDevice);
+ VcoKHz = *pPllOutKHz;
+ P = 0;
+
+ if (pCinfo->SourceId == NvRmClockSource_PllD0)
+ { // PLLD output is always divided by 2 (after P-divider)
+ VcoKHz = VcoKHz << 1;
+ MaxOutKHz = MaxOutKHz << 1;
+ flags = NvRmPllConfigFlags_DiffClkEnable;
+ }
+ if (pCinfo->SourceId == NvRmClockSource_PllX0)
+ {
+ flags = VcoKHz < NVRM_PLLX_DCC_VCO_MIN ?
+ NvRmPllConfigFlags_DccDisable : NvRmPllConfigFlags_DccEnable;
+ }
+ NV_ASSERT((pCinfo->PllVcoMin <= VcoKHz) && (VcoKHz <= pCinfo->PllVcoMax));
+ NV_ASSERT(VcoKHz <= NVRM_PLL_VCO_RANGE_2);
+ NV_ASSERT(VcoKHz <= MaxOutKHz);
+
+ RefKHz = NvRmPrivGetClockSourceFreq(pCinfo->InputId);
+ NV_ASSERT(RefKHz);
+ if (VcoKHz <= NVRM_PLL_VCO_RANGE_1)
+ M = RefKHz / NVRM_PLL_FCMP_1;
+ else
+ M = (RefKHz + NVRM_PLL_FCMP_2 - 1) / NVRM_PLL_FCMP_2;
+ N = (RefKHz + ((VcoKHz * M) << 1) ) / (RefKHz << 1);
+ if ((RefKHz * N) > (MaxOutKHz * M))
+ N--; // use floor if rounding violates client's max limit
+
+ NvRmPrivAp15PllSet(
+ hRmDevice, pCinfo, M, N, P, (NvU32)-1, 0, 0, NV_TRUE, flags);
+ *pPllOutKHz = NvRmPrivGetClockSourceFreq(pCinfo->SourceId);
+}
+
+/*****************************************************************************/
+
+// Fixed list of PLLP configurations for different reference frequencies
+// arranged according to CLK_RST_CONTROLLER_OSC_CTRL_0_OSC_FREQ_FIELD enum
+static const NvRmPllFixedConfig s_Ap15PllPConfigurations[] =
+{
+ NVRM_PLLP_AT_13MHZ,
+ NVRM_PLLP_AT_19MHZ,
+ NVRM_PLLP_AT_12MHZ,
+ NVRM_PLLP_AT_26MHZ
+};
+
+static void
+Ap15PllPConfigure(NvRmDeviceHandle hRmDevice)
+{
+ NvU32 reg;
+ NvRmFreqKHz PllKHz;
+ NvRmPllFixedConfig PllPConfig = {0};
+
+ const NvRmPllClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllP0)->pInfo.pPll;
+ NV_ASSERT(hRmDevice);
+
+ // Configure and enable PllP at RM fixed frequency,
+ // if it is not already enabled
+ PllKHz = NvRmPrivGetClockSourceFreq(pCinfo->SourceId);
+ if (PllKHz == NVRM_PLLP_FIXED_FREQ_KHZ)
+ return;
+
+ // Get fixed PLLP configuration for current oscillator frequency.
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_OSC_CTRL_0);
+ PllPConfig = s_Ap15PllPConfigurations[NV_DRF_VAL(
+ CLK_RST_CONTROLLER, OSC_CTRL, OSC_FREQ, reg)];
+
+ // Configure and enable PLLP
+ NvRmPrivAp15PllSet(hRmDevice, pCinfo, PllPConfig.M, PllPConfig.N,
+ PllPConfig.P, (NvU32)-1, 0, 0, NV_TRUE,
+ NvRmPllConfigFlags_Override);
+}
+
+/*****************************************************************************/
+
+// Fixed list of PLLU configurations for different reference frequencies
+// arranged according to CLK_RST_CONTROLLER_OSC_CTRL_0_OSC_FREQ_FIELD enum
+static const NvRmPllFixedConfig s_Ap15UsbPllConfigurations[] =
+{
+ NVRM_PLLU_AT_13MHZ,
+ NVRM_PLLU_AT_19MHZ,
+ NVRM_PLLU_AT_12MHZ,
+ NVRM_PLLU_AT_26MHZ
+};
+
+static const NvRmPllFixedConfig s_Ap15UlpiPllConfigurations[] =
+{
+ NVRM_PLLU_ULPI_AT_13MHZ,
+ NVRM_PLLU_ULPI_AT_19MHZ,
+ NVRM_PLLU_ULPI_AT_12MHZ,
+ NVRM_PLLU_ULPI_AT_26MHZ
+};
+
+static const NvRmPllFixedConfig s_Ap15UhsPllConfigurations[] =
+{
+ NVRM_PLLU_HS_AT_13MHZ,
+ NVRM_PLLU_HS_AT_19MHZ,
+ NVRM_PLLU_HS_AT_12MHZ,
+ NVRM_PLLU_HS_AT_26MHZ
+};
+
+static void
+PllUmipiConfigure(NvRmDeviceHandle hRmDevice, NvRmFreqKHz TargetFreq)
+{
+ NvU32 reg;
+ NvRmPllFixedConfig UsbConfig = {0};
+ const NvRmPllClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllU0)->pInfo.pPll;
+ NvRmFreqKHz CurrentFreq = NvRmPrivGetClockSourceFreq(pCinfo->SourceId);
+ NV_ASSERT(hRmDevice);
+
+ if (CurrentFreq == TargetFreq)
+ return; // PLLU is already configured at target frequency - exit
+
+ // Index into fixed PLLU configuration tables based on oscillator frequency
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_OSC_CTRL_0);
+ reg = NV_DRF_VAL(CLK_RST_CONTROLLER, OSC_CTRL, OSC_FREQ, reg);
+
+ if (TargetFreq == NvRmFreqUnspecified)
+ {
+ // By default set standard USB frequency, if PLLU is not configured
+ if ((CurrentFreq == s_Ap15UsbPllConfigurations[reg].OutputKHz) ||
+ (CurrentFreq == s_Ap15UlpiPllConfigurations[reg].OutputKHz))
+ {
+ return; // PLLU is already configured at supported frequency - exit
+ }
+ UsbConfig = s_Ap15UsbPllConfigurations[reg];
+ }
+ else if (TargetFreq == s_Ap15UsbPllConfigurations[reg].OutputKHz)
+ {
+ UsbConfig = s_Ap15UsbPllConfigurations[reg];
+ }
+ else if (TargetFreq == s_Ap15UlpiPllConfigurations[reg].OutputKHz)
+ {
+ UsbConfig = s_Ap15UlpiPllConfigurations[reg];
+ }
+ else
+ {
+ NV_ASSERT(!"Invalid target frequency");
+ return;
+ }
+ // Configure and enable PLLU
+ NvRmPrivAp15PllSet(hRmDevice, pCinfo, UsbConfig.M, UsbConfig.N,
+ UsbConfig.P, (NvU32)-1, 0, 0, NV_TRUE, 0);
+}
+
+static void
+PllUhsConfigure(NvRmDeviceHandle hRmDevice, NvRmFreqKHz TargetFreq)
+{
+ NvU32 reg;
+ NvRmPllFixedConfig UsbConfig = {0};
+ const NvRmPllClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllU0)->pInfo.pPll;
+ NvRmFreqKHz CurrentFreq = NvRmPrivGetClockSourceFreq(pCinfo->SourceId);
+ NV_ASSERT(hRmDevice);
+
+ // Index into fixed PLLU configuration tables based on oscillator frequency
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_OSC_CTRL_0);
+ reg = NV_DRF_VAL(CLK_RST_CONTROLLER, OSC_CTRL, OSC_FREQ, reg);
+
+ // If PLLU is already configured - exit
+ if (CurrentFreq == s_Ap15UhsPllConfigurations[reg].OutputKHz)
+ return;
+
+ /*
+ * Target may be unspecified, or any of the standard USB, ULPI, or UHS
+ * frequencies. In any case, main PLLU HS output is configured at UHS
+ * frequency, with ULPI and USB frequencies are generated on secondary
+ * outputs by fixed post dividers
+ */
+ if (!( (TargetFreq == NvRmFreqUnspecified) ||
+ (TargetFreq == s_Ap15UsbPllConfigurations[reg].OutputKHz) ||
+ (TargetFreq == s_Ap15UlpiPllConfigurations[reg].OutputKHz) ||
+ (TargetFreq == s_Ap15UhsPllConfigurations[reg].OutputKHz) )
+ )
+ {
+ NV_ASSERT(!"Invalid target frequency");
+ return;
+ }
+ // Configure and enable PLLU
+ UsbConfig = s_Ap15UhsPllConfigurations[reg];
+ NvRmPrivAp15PllSet(hRmDevice, pCinfo, UsbConfig.M, UsbConfig.N,
+ UsbConfig.P, (NvU32)-1, 0, 0, NV_TRUE, 0);
+}
+
+static void
+Ap15PllUConfigure(NvRmDeviceHandle hRmDevice, NvRmFreqKHz TargetFreq)
+{
+ const NvRmPllClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllU0)->pInfo.pPll;
+
+ if (pCinfo->PllType == NvRmPllType_MIPI)
+ PllUmipiConfigure(hRmDevice, TargetFreq);
+ else if (pCinfo->PllType == NvRmPllType_UHS)
+ PllUhsConfigure(hRmDevice, TargetFreq);
+}
+
+/*****************************************************************************/
+
+// Fixed list of PLLA configurations for supported audio clocks
+static const NvRmPllFixedConfig s_Ap15AudioPllConfigurations[] =
+{
+ NVRM_PLLA_CONFIGURATIONS
+};
+
+static void
+Ap15PllAConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz* pAudioTargetKHz)
+{
+// The reminder bits used to check divisibility
+#define REMINDER_BITS (6)
+
+ NvU32 i, rem;
+ NvRmFreqKHz OutputKHz;
+ NvU32 BestRem = (0x1 << REMINDER_BITS);
+ NvU32 BestIndex = NV_ARRAY_SIZE(s_Ap15AudioPllConfigurations) - 1;
+
+ NvRmPllFixedConfig AudioConfig = {0};
+ const NvRmPllClockInfo* pPllCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllA1)->pInfo.pPll;
+ const NvRmDividerClockInfo* pDividerCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllA0)->pInfo.pDivider;
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(*pAudioTargetKHz);
+
+ // Fixed PLLA FPGA configuration
+ if (NvRmPrivGetExecPlatform(hRmDevice) == ExecPlatform_Fpga)
+ {
+ *pAudioTargetKHz = NvRmPrivGetClockSourceFreq(pDividerCinfo->SourceId);
+ return;
+ }
+ // Find PLLA configuration with smallest output frequency that can be
+ // divided by fractional divider into the closest one to the target.
+ for (i = 0; i < NV_ARRAY_SIZE(s_Ap15AudioPllConfigurations); i++)
+ {
+ OutputKHz = s_Ap15AudioPllConfigurations[i].OutputKHz;
+ if (*pAudioTargetKHz > OutputKHz)
+ continue;
+ rem = ((OutputKHz << (REMINDER_BITS + 1)) / (*pAudioTargetKHz)) &
+ ((0x1 << REMINDER_BITS) - 1);
+ if (rem < BestRem)
+ {
+ BestRem = rem;
+ BestIndex = i;
+ if (rem == 0)
+ break;
+ }
+ }
+
+ // Configure PLLA and output divider
+ AudioConfig = s_Ap15AudioPllConfigurations[BestIndex];
+ NvRmPrivAp15PllSet(hRmDevice, pPllCinfo, AudioConfig.M, AudioConfig.N,
+ AudioConfig.P, (NvU32)-1, 0, 0, NV_TRUE, 0);
+ NvRmPrivDividerSet(
+ hRmDevice, pDividerCinfo, AudioConfig.D);
+ *pAudioTargetKHz = NvRmPrivGetClockSourceFreq(pDividerCinfo->SourceId);
+}
+
+static void
+Ap15PllAControl(
+ NvRmDeviceHandle hRmDevice,
+ NvBool Enable)
+{
+ const NvRmDividerClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllA0)->pInfo.pDivider;
+ if (NvRmPrivGetExecPlatform(hRmDevice) == ExecPlatform_Fpga)
+ return; // No PLLA control on FPGA
+
+ if (Enable)
+ {
+ Ap15PllControl(hRmDevice, NvRmClockSource_PllA1, NV_TRUE);
+ }
+ else
+ {
+ // Disable provided PLLA is not used as a source for any clock
+ if (NvRmPrivGetDfsFlags(hRmDevice) & NvRmDfsStatusFlags_StopPllA0)
+ Ap15PllControl(hRmDevice, NvRmClockSource_PllA1, NV_FALSE);
+ }
+ NvRmPrivDividerFreqUpdate(hRmDevice, pCinfo);
+}
+
+static void
+Ap15AudioSyncInit(NvRmDeviceHandle hRmDevice, NvRmFreqKHz AudioSyncKHz)
+{
+ NvRmFreqKHz AudioTargetKHz;
+ NvRmClockSource AudioSyncSource;
+ const NvRmSelectorClockInfo* pCinfo;
+ NV_ASSERT(hRmDevice);
+
+ // Configure PLLA. Requested frequency must always exactly match one of the
+ // fixed audio frequencies.
+ AudioTargetKHz = AudioSyncKHz;
+ Ap15PllAConfigure(hRmDevice, &AudioTargetKHz);
+ NV_ASSERT(AudioTargetKHz == AudioSyncKHz);
+
+ // Use PLLA as audio sync source, and disable doublers.
+ // (verify if SoC supports audio sync selectors)
+ AudioSyncSource = NvRmClockSource_PllA0;
+ if (NvRmPrivGetClockSourceHandle(NvRmClockSource_AudioSync))
+ {
+ pCinfo = NvRmPrivGetClockSourceHandle(
+ NvRmClockSource_AudioSync)->pInfo.pSelector;
+ NvRmPrivSelectorClockSet(hRmDevice, pCinfo, AudioSyncSource, NV_FALSE);
+ }
+ if (NvRmPrivGetClockSourceHandle(NvRmClockSource_MpeAudio))
+ {
+ pCinfo = NvRmPrivGetClockSourceHandle(
+ NvRmClockSource_MpeAudio)->pInfo.pSelector;
+ NvRmPrivSelectorClockSet(hRmDevice, pCinfo, AudioSyncSource, NV_FALSE);
+ }
+}
+
+/*****************************************************************************/
+
+static void
+Ap15PllDControl(
+ NvRmDeviceHandle hRmDevice,
+ NvBool Enable)
+{
+ NvU32 reg;
+ NvRmModuleClockInfo* pCinfo = NULL;
+ NvRmModuleClockState* pCstate = NULL;
+ NV_ASSERT_SUCCESS(NvRmPrivGetClockState(
+ hRmDevice, NvRmModuleID_Dsi, &pCinfo, &pCstate));
+
+ if (Enable)
+ {
+ Ap15PllControl(hRmDevice, NvRmClockSource_PllD0, NV_TRUE);
+ pCstate->actual_freq =
+ NvRmPrivGetClockSourceFreq(NvRmClockSource_PllD0);
+ return;
+ }
+
+ // Disable PLLD if it is not used by either display head or DSI
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->ClkEnableOffset);
+ if (NvRmPrivIsSourceSelectedByModule(hRmDevice, NvRmClockSource_PllD0,
+ NVRM_MODULE_ID(NvRmModuleID_Display, 0)) ||
+ NvRmPrivIsSourceSelectedByModule(hRmDevice, NvRmClockSource_PllD0,
+ NVRM_MODULE_ID(NvRmModuleID_Display, 1)) ||
+ ((reg & pCinfo->ClkEnableField) == pCinfo->ClkEnableField))
+ return;
+
+ Ap15PllControl(hRmDevice, NvRmClockSource_PllD0, NV_FALSE);
+ pCstate->actual_freq =
+ NvRmPrivGetClockSourceFreq(NvRmClockSource_PllD0);
+}
+
+// Fixed list of PLL HDMI configurations for different reference frequencies
+// arranged according to CLK_RST_CONTROLLER_OSC_CTRL_0_OSC_FREQ_FIELD enum
+static const NvRmPllFixedConfig s_Ap15HdmiPllConfigurations[] =
+{
+ NVRM_PLLHD_AT_13MHZ,
+ NVRM_PLLHD_AT_19MHZ,
+ NVRM_PLLHD_AT_12MHZ,
+ NVRM_PLLHD_AT_26MHZ
+};
+
+static void
+Ap15PllDConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz TargetFreq)
+{
+ NvRmFreqKHz MaxFreq = NvRmPrivGetSocClockLimits(NvRmModuleID_Dsi)->MaxKHz;
+ NvRmModuleClockInfo* pCinfo = NULL;
+ NvRmModuleClockState* pCstate = NULL;
+ NV_ASSERT_SUCCESS(NvRmPrivGetClockState(
+ hRmDevice, NvRmModuleID_Dsi, &pCinfo, &pCstate));
+
+ /*
+ * PLLD is adjusted when DDK/ODM is initializing DSI or reconfiguring
+ * display clock (for HDMI, DSI, or in some cases CRT).
+ */
+ if ((TargetFreq == NVRM_HDMI_720p_1080i_FIXED_FREQ_KHZ) ||
+ (TargetFreq == NVRM_HDMI_720p_1080p_FIXED_FREQ_KHZ) ||
+ (TargetFreq == NVRM_HDMI_480_FIXED_FREQ_KHZ))
+ {
+ // 480p or 720p or 1080i/1080p HDMI - use fixed PLLD configuration
+ NvU32 reg;
+ NvRmPllFixedConfig HdmiConfig = {0};
+ const NvRmPllClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllD0)->pInfo.pPll;
+
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_OSC_CTRL_0);
+ HdmiConfig = s_Ap15HdmiPllConfigurations[NV_DRF_VAL(
+ CLK_RST_CONTROLLER, OSC_CTRL, OSC_FREQ, reg)];
+
+ NvRmPrivAp15PllSet(hRmDevice, pCinfo, HdmiConfig.M, HdmiConfig.N,
+ HdmiConfig.P, (NvU32)-1, 0, 0, NV_TRUE, 0);
+ }
+ else
+ {
+ // for other targets use simple variable configuration
+ if (TargetFreq < NVRM_PLLD_DISPLAY_MIN_KHZ)
+ {
+ NV_ASSERT((TargetFreq * NVRM_DISPLAY_DIVIDER_MAX) >=
+ NVRM_PLLD_DISPLAY_MIN_KHZ);
+ TargetFreq =
+ ((NVRM_PLLD_DISPLAY_MIN_KHZ / TargetFreq) + 1) * TargetFreq;
+ }
+ NV_ASSERT(TargetFreq <= MaxFreq);
+ NvRmPrivAp15PllConfigureSimple(
+ hRmDevice, NvRmClockSource_PllD0, MaxFreq, &TargetFreq);
+ }
+
+ // Update DSI clock state (PLLD is a single source, no divider)
+ pCstate->SourceClock = 0;
+ pCstate->Divider = 1;
+ pCstate->actual_freq =
+ NvRmPrivGetClockSourceFreq(NvRmClockSource_PllD0);
+ NvRmPrivModuleVscaleReAttach(hRmDevice,
+ pCinfo, pCstate, pCstate->actual_freq, pCstate->actual_freq);
+}
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+static void
+Ap15DisplayClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleClockInfo *pCinfo,
+ NvRmFreqKHz MinFreq,
+ NvRmFreqKHz MaxFreq,
+ NvRmFreqKHz TargetFreq,
+ NvRmModuleClockState* pCstate,
+ NvU32 flags)
+{
+ NvU32 i;
+ NvRmClockSource SourceId;
+ NvRmFreqKHz PixelFreq = TargetFreq;
+ NvRmFreqKHz SourceClockFreq = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
+
+ /*
+ * Display clock source selection policy:
+ * - if MIPI flag is specified - use PLLD, and reconfigure it as necessary
+ * - else if Oscillator output provides required accuracy - use Oscillator
+ * - else if PLLP fixed output provides required accuracy - use fixed PLLP
+ * - else if PPLC is used by other head - use PLLD, and reconfigure it as
+ * necessary
+ * - else - use use PLLC, and reconfigure it as necessary
+ */
+ if (flags & NvRmClockConfig_MipiSync)
+ {
+ // PLLD requested
+ SourceId = NvRmClockSource_PllD0;
+ Ap15PllDConfigure(hRmDevice, TargetFreq);
+ }
+ else if (NvRmIsFreqRangeReachable(
+ SourceClockFreq, MinFreq, MaxFreq, NVRM_DISPLAY_DIVIDER_MAX))
+ {
+ // Target frequency is reachable from Oscillator - nothing to do
+ SourceId = NvRmClockSource_ClkM;
+ }
+ else if (NvRmIsFreqRangeReachable(NVRM_PLLP_FIXED_FREQ_KHZ,
+ MinFreq, MaxFreq, NVRM_DISPLAY_DIVIDER_MAX))
+ {
+ // Target frequency is reachable from PLLP0 - make sure it is enabled
+ SourceId = NvRmClockSource_PllP0;
+ Ap15PllPConfigure(hRmDevice);
+ }
+ else if (NvRmPrivIsSourceSelectedByModule(hRmDevice, NvRmClockSource_PllC0,
+ NVRM_MODULE_ID(pCinfo->Module, (1 - pCinfo->Instance))))
+ {
+ // PLLC is used by the other head - only PLLD left
+ SourceId = NvRmClockSource_PllD0;
+ Ap15PllDConfigure(hRmDevice, TargetFreq);
+ }
+ else
+ {
+ // PLLC is available - use it
+ SourceId = NvRmClockSource_PllC0;
+ TargetFreq = NvRmPrivGetMaxFreqPllC(hRmDevice); // Target PLLC max
+ if (!NvRmIsFreqRangeReachable(
+ TargetFreq, MinFreq, MaxFreq, NVRM_DISPLAY_DIVIDER_MAX))
+ {
+ TargetFreq = MaxFreq; // Target pixel range max
+ }
+ NvRmPrivReConfigurePllC(hRmDevice, TargetFreq);
+ }
+
+ // Fill in clock state
+ for (i = 0; i < NvRmClockSource_Num; i++)
+ {
+ if (pCinfo->Sources[i] == SourceId)
+ break;
+ }
+ NV_ASSERT(i < NvRmClockSource_Num);
+ pCstate->SourceClock = i; // source index
+ pCstate->Divider = 1; // no divider (display driver has its own)
+ pCstate->actual_freq = NvRmPrivGetClockSourceFreq(SourceId); // source KHz
+ NV_ASSERT(NvRmIsFreqRangeReachable(
+ pCstate->actual_freq, MinFreq, MaxFreq, NVRM_DISPLAY_DIVIDER_MAX));
+
+ if (flags & NvRmClockConfig_SubConfig)
+ {
+ NvRmModuleClockInfo* pTvDacInfo = NULL;
+ NvRmModuleClockState* pTvDacState = NULL;
+ NV_ASSERT_SUCCESS(NvRmPrivGetClockState(
+ hRmDevice, NvRmModuleID_Tvo, &pTvDacInfo, &pTvDacState));
+
+ // TVDAC is the 2nd TVO subclock (CVE is the 1st one)
+ pTvDacInfo += 2;
+ pTvDacState += 2;
+ NV_ASSERT(pTvDacInfo->Module == NvRmModuleID_Tvo);
+ NV_ASSERT(pTvDacInfo->SubClockId == 2);
+
+ // enable the tvdac clock
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ Ap15EnableTvDacClock(hRmDevice, ModuleClockState_Enable);
+ else if (hRmDevice->ChipId.Id == 0x20)
+ Ap20EnableTvDacClock(hRmDevice, ModuleClockState_Enable);
+
+ // Set TVDAC = pixel clock (same source index and calculate divider
+ // exactly as dc_hal.c does)
+ pTvDacState->SourceClock = i;
+ pTvDacState->Divider =
+ (((pCstate->actual_freq * 2 ) + PixelFreq / 2) / PixelFreq) - 2;
+ pTvDacState->actual_freq =
+ (pCstate->actual_freq * 2 ) / (pTvDacState->Divider + 2);
+ NvRmPrivModuleClockSet(hRmDevice, pTvDacInfo, pTvDacState);
+ }
+ if (flags & NvRmClockConfig_DisableTvDAC)
+ {
+ // disable the tvdac clock
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ Ap15EnableTvDacClock(hRmDevice, ModuleClockState_Disable);
+ else if (hRmDevice->ChipId.Id == 0x20)
+ Ap20EnableTvDacClock(hRmDevice, ModuleClockState_Disable);
+ }
+}
+
+NvBool
+NvRmPrivAp15IsModuleClockException(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleClockInfo *pCinfo,
+ NvU32 ClockSourceCount,
+ NvRmFreqKHz MinFreq,
+ NvRmFreqKHz MaxFreq,
+ const NvRmFreqKHz* PrefFreqList,
+ NvU32 PrefCount,
+ NvRmModuleClockState* pCstate,
+ NvU32 flags)
+{
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo && PrefFreqList && pCstate);
+
+ switch (pCinfo->Module)
+ {
+ case NvRmModuleID_Display:
+ /*
+ * Special handling for display clocks. Must satisfy requirements
+ * for the 1st requested frequency and complete configuration.
+ * Note that AP15 display divider is within module itself, so the
+ * input request is for pisxel clock, but output *pCstate specifies
+ * source frequency. Display driver will configure divider.
+ */
+ Ap15DisplayClockConfigure(hRmDevice, pCinfo,
+ MinFreq, MaxFreq, PrefFreqList[0], pCstate, flags);
+ return NV_TRUE;
+
+ case NvRmModuleID_Dsi:
+ /*
+ * Reconfigure PLLD to match requested frequency, and update DSI
+ * clock state.
+ */
+ Ap15PllDConfigure(hRmDevice, PrefFreqList[0]);
+ NV_ASSERT((MinFreq <= pCstate->actual_freq) &&
+ (pCstate->actual_freq <= MaxFreq));
+ return NV_TRUE;
+
+ case NvRmModuleID_Hdmi:
+ /*
+ * Complete configuration from PLLD if requested (PLLD should be already
+ * configured properly for display)
+ */
+ if (flags & NvRmClockConfig_MipiSync)
+ {
+ NvRmFreqKHz FreqKHz =
+ NvRmPrivGetClockSourceFreq(NvRmClockSource_PllD0);
+ pCstate->SourceClock = 1; // PLLD source index
+ pCstate->Divider = ((FreqKHz << 2) + PrefFreqList[0]) /
+ (PrefFreqList[0] << 1) - 2;
+ pCstate->actual_freq = (FreqKHz << 1) / (pCstate->Divider + 2);
+ NV_ASSERT(pCinfo->Sources[pCstate->SourceClock] ==
+ NvRmClockSource_PllD0);
+ NV_ASSERT(pCstate->Divider <= pCinfo->DivisorFieldMask);
+ NV_ASSERT((MinFreq <= pCstate->actual_freq) &&
+ (pCstate->actual_freq <= MaxFreq));
+ return NV_TRUE;
+ }
+ return NV_FALSE;
+
+ case NvRmModuleID_Spdif:
+ if (flags & NvRmClockConfig_SubConfig)
+ return NV_FALSE; // Nothing special for SPDIFIN
+ // fall through for SPDIFOUT
+ case NvRmModuleID_I2s:
+ /*
+ * If requested, reconfigure PLLA to match target frequency, and
+ * complete clock configuration with PLLA as a source. Otherwise,
+ * make sure PLLA is enabled (at current configuration), and
+ * continue regular configuration for SPDIFOUT and I2S.
+ */
+ if (flags & NvRmClockConfig_AudioAdjust)
+ {
+ NvRmFreqKHz FreqKHz = PrefFreqList[0];
+ Ap15PllAConfigure(hRmDevice, &FreqKHz);
+
+ pCstate->SourceClock = 0; // PLLA source index
+ pCstate->Divider = ((FreqKHz << 2) + PrefFreqList[0]) /
+ (PrefFreqList[0] << 1) - 2;
+ pCstate->actual_freq = (FreqKHz << 1) / (pCstate->Divider + 2);
+ if (NvRmPrivGetExecPlatform(hRmDevice) == ExecPlatform_Fpga)
+ { // Fake return on FPGA (PLLA is not configurable, anyway)
+ pCstate->actual_freq = PrefFreqList[0];
+ }
+ NV_ASSERT(pCinfo->Sources[pCstate->SourceClock] ==
+ NvRmClockSource_PllA0);
+ NV_ASSERT(pCstate->Divider <= pCinfo->DivisorFieldMask);
+ NV_ASSERT((MinFreq <= pCstate->actual_freq) &&
+ (pCstate->actual_freq <= MaxFreq));
+ return NV_TRUE;
+ }
+ Ap15PllAControl(hRmDevice, NV_TRUE);
+ return NV_FALSE;
+
+ case NvRmModuleID_Usb2Otg:
+ /*
+ * Reconfigure PLLU to match requested frequency, and complete USB
+ * clock configuration (PLLU is a single source, no divider)
+ */
+ Ap15PllUConfigure(hRmDevice, PrefFreqList[0]);
+ pCstate->SourceClock = 0;
+ pCstate->Divider = 1;
+ pCstate->actual_freq = PrefFreqList[0];
+ return NV_TRUE;
+
+ default:
+ // No exception for other modules - continue regular configuration
+ return NV_FALSE;
+ }
+}
+
+/*****************************************************************************/
+
+void
+NvRmPrivAp15DisablePLLs(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmModuleClockInfo* pCinfo,
+ const NvRmModuleClockState* pCstate)
+{
+#if !NV_OAL
+ switch (pCinfo->Module)
+ {
+ case NvRmModuleID_Display:
+ NvRmPrivBoostPllC(hRmDevice);
+ Ap15PllDControl(hRmDevice, NV_FALSE);
+ break;
+
+ case NvRmModuleID_Spdif:
+ case NvRmModuleID_I2s:
+ Ap15PllAControl(hRmDevice, NV_FALSE);
+ break;
+
+ default:
+ break;
+ }
+#endif
+}
+
+void
+NvRmPrivAp15PllDPowerControl(
+ NvRmDeviceHandle hRmDevice,
+ NvBool ConfigEntry,
+ NvBool* pMipiPllVddOn)
+{
+#if !NV_OAL
+ if (ConfigEntry)
+ {
+ // On entry to display clock configuration get PLLD power ready
+ if (!(*pMipiPllVddOn))
+ {
+ NvRmPrivPmuRailControl(hRmDevice, NV_VDD_PLLD_ODM_ID, NV_TRUE);
+ *pMipiPllVddOn = NV_TRUE;
+ }
+ }
+ else
+ {
+ // On exit from display clock configuration turn off PLLD power
+ // if it is disabled
+ if ((*pMipiPllVddOn) &&
+ (NvRmPrivGetClockSourceFreq(NvRmClockSource_PllD0) <=
+ NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM)))
+ {
+ NvRmPrivPmuRailControl(hRmDevice, NV_VDD_PLLD_ODM_ID, NV_FALSE);
+ *pMipiPllVddOn = NV_FALSE;
+ }
+ }
+#endif
+}
+
+void
+NvRmPrivConfigureClockSource(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID ModuleId,
+ NvBool enable)
+{
+ // Extract module and instance from composite module id.
+ NvU32 Module = NVRM_MODULE_ID_MODULE( ModuleId );
+
+ switch (Module)
+ {
+ case NvRmModuleID_Usb2Otg:
+ // Do not disable the PLLU clock once it is enabled
+ // Set PLLU default configuration if it is not already configured
+ if (enable)
+ Ap15PllUConfigure(hRmDevice, NvRmFreqUnspecified);
+ break;
+#if !NV_OAL
+ case NvRmModuleID_Spdif:
+ case NvRmModuleID_I2s:
+ if (enable)
+ {
+ // Do not enable if PLLA is not used as a source for any clock
+ if (NvRmPrivGetDfsFlags(hRmDevice) & NvRmDfsStatusFlags_StopPllA0)
+ break;
+ }
+ // fall through
+ case NvRmModuleID_Mpe:
+ Ap15PllAControl(hRmDevice, enable);
+ break;
+
+ case NvRmModuleID_Dsi:
+ Ap15PllDControl(hRmDevice, enable);
+ break;
+
+ case NvRmPrivModuleID_Pcie:
+ NvRmPrivAp20PllEControl(hRmDevice, enable);
+ break;
+#endif
+ default:
+ break;
+ }
+ return;
+}
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/*
+ * Basic DFS clock control policy outline:
+ * - Oscillator ClkM, doubler ClkD, and memory PLLM0 - always available, fixed
+ * frequency sources.
+ * - Peripheral PLLP0 may be dynamically enabled / disabled when DFS is stopped
+ * and CPU is power gated. Hence, when DFS is running it is always enabled
+ * and configured at fixed PLLP0 frequency.
+ * - Cpu PLLC0 may be dynamically enabled / disabled when DFS is stopped and
+ * CPU is power gated. Hence, when DFS is running it is always enabled. PLLC0
+ * is commonly configured at maximum CPU domain frequency. If necessary, it
+ * may be adjusted to provide required display pixel clock frequency.
+ * - System buses, and MC/EMC configuration, clock source multiplexes and
+ * dividers, as well as PLLP2, PLLP4 and PLLM1 dividers are under exclusive
+ * DFS control, and are not accessed by any other code except bootloader
+ * before RM is open.
+ */
+
+// Limit frequencies ratio for Vpipe : System >= 1 : 2^(value - 1)
+#define LIMIT_SYS_TO_VDE_RATIO (2)
+
+// Limit frequencies ratio for AHB : System >= 1:2 and APB : System >= 1 : 4
+#define LIMIT_SYS_TO_AHB_APB_RATIOS (1)
+
+// PLLP2 must be used as a variable source for System clock.
+#define PLLP_POLICY_ENTRY(KHz) \
+ { NvRmClockSource_PllP2,\
+ (NVRM_PLLP_FIXED_FREQ_KHZ * 2)/((NVRM_PLLP_FIXED_FREQ_KHZ * 2)/KHz),\
+ ((NVRM_PLLP_FIXED_FREQ_KHZ * 2)/KHz - 2)\
+ },
+static const NvRmDfsSource s_Ap15PllPSystemClockPolicy[] =
+{
+ NVRM_AP15_PLLP_POLICY_SYSTEM_CLOCK
+};
+static const NvU32 s_Ap15PllPSystemClockPolicyEntries =
+ NV_ARRAY_SIZE(s_Ap15PllPSystemClockPolicy);
+#undef PLLP_POLICY_ENTRY
+
+
+// PLLP4 must be used as a variable source for cpu clock.
+#define PLLP_POLICY_ENTRY(KHz) \
+ { NvRmClockSource_PllP4,\
+ (NVRM_PLLP_FIXED_FREQ_KHZ * 2)/((NVRM_PLLP_FIXED_FREQ_KHZ * 2)/KHz),\
+ ((NVRM_PLLP_FIXED_FREQ_KHZ * 2)/KHz - 2)\
+ },
+static const NvRmDfsSource s_Ap15PllPCpuClockPolicy[] =
+{
+ NVRM_AP15_PLLP_POLICY_CPU_CLOCK
+};
+static const NvU32 s_Ap15PllPCpuClockPolicyEntries =
+ NV_ARRAY_SIZE(s_Ap15PllPCpuClockPolicy);
+#undef PLLP_POLICY_ENTRY
+
+/*
+ * Sorted list of timing parameters for discrete set of EMC frequencies used
+ * by DFS: entry 0 specifies timing parameters for PLLM0 output frequency,
+ * entry n (n = 1, 2, ... number of EMC steps-1) specifies timing parameters
+ * for EMC frequency = PLLM0 frequency / (2 * n); thus only frequencies evenly
+ * divided down from PLLM0 will be used by DFS
+ */
+static NvRmAp15EmcTimingConfig
+s_Ap15EmcConfigSortedTable[NVRM_AP15_DFS_EMC_FREQ_STEPS];
+
+static struct MemClocksRec
+{
+ // Index of selected EMC configuration entry
+ NvU32 Index;
+
+ // Pointers to EMC and MC clock descriptors
+ NvRmModuleClockInfo* pEmcInfo;
+ NvRmModuleClockInfo* pMcInfo;
+
+ // Pointers to EMC and MC clock state records
+ NvRmModuleClockState* pEmcState;
+ NvRmModuleClockState* pMcState;
+
+} s_MemClocks = {0};
+
+static const NvU32 s_Cpu2EmcRatioPolicyTable[] =
+{
+ NVRM_AP15_CPU_EMC_RATIO_POLICY
+};
+
+/*****************************************************************************/
+
+static void
+Ap15Emc2xFreqGet(
+ NvRmDeviceHandle hRmDevice)
+{
+ NvU32 reg;
+ NvRmFreqKHz SourceClockFreq;
+ NvRmModuleClockInfo* pCinfo = s_MemClocks.pEmcInfo;
+ NvRmModuleClockState* pCstate = s_MemClocks.pEmcState;
+
+ NV_ASSERT(pCinfo && pCstate);
+
+ // Determine EMC2x source and divider setting; update EMC2x clock state
+ reg = NV_REGR(hRmDevice,
+ NvRmPrivModuleID_ClockAndReset, 0, pCinfo->ClkSourceOffset);
+ pCstate->Divider =
+ ((reg >> pCinfo->DivisorFieldShift) & pCinfo->DivisorFieldMask);
+ pCstate->SourceClock =
+ ((reg >> pCinfo->SourceFieldShift) & pCinfo->SourceFieldMask);
+ SourceClockFreq =
+ NvRmPrivGetClockSourceFreq(pCinfo->Sources[pCstate->SourceClock]);
+
+ // Fractional divider output = (Source Frequency * 2) / (divider + 2)
+ pCstate->actual_freq = ((SourceClockFreq << 1) / (pCstate->Divider + 2));
+}
+
+// Enable/Disable EMC low-latency return-fifo reservation scheme
+// (enable requires confirmation polling)
+#define NVRM_AP15_EMCLL_RETRSV_ENABLE \
+do\
+{\
+ NvU32 reg; \
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, \
+ 0, EMC_LL_ARB_CONFIG_0); \
+ reg = NV_FLD_SET_DRF_DEF( \
+ EMC, LL_ARB_CONFIG, LL_RETRSV_ENABLE, ENABLED, reg); \
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, \
+ 0, EMC_LL_ARB_CONFIG_0, reg); \
+ while (reg != NV_REGR(hRmDevice, NvRmPrivModuleID_ExternalMemoryController,\
+ 0, EMC_LL_ARB_CONFIG_0)) \
+ ; \
+} while(0)
+
+#define NVRM_AP15_EMCLL_RETRSV_DISABLE \
+do\
+{\
+ NvU32 reg; \
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, \
+ 0, EMC_LL_ARB_CONFIG_0); \
+ reg = NV_FLD_SET_DRF_DEF( \
+ EMC, LL_ARB_CONFIG, LL_RETRSV_ENABLE, DISABLED, reg); \
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, \
+ 0, EMC_LL_ARB_CONFIG_0, reg); \
+} while (0)
+
+void
+NvRmPrivAp15SetEmcForCpuSrcSwitch(NvRmDeviceHandle hRmDevice)
+{
+ NVRM_AP15_EMCLL_RETRSV_ENABLE;
+}
+
+void
+NvRmPrivAp15SetEmcForCpuDivSwitch(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz CpuFreq,
+ NvBool Before)
+{
+ NvRmFreqKHz EmcFreq = (s_MemClocks.pEmcState->actual_freq >> 1);
+ if (Before && (CpuFreq < EmcFreq))
+ {
+ NVRM_AP15_EMCLL_RETRSV_ENABLE;
+ }
+ else if (!Before && (CpuFreq >= EmcFreq))
+ {
+ NVRM_AP15_EMCLL_RETRSV_DISABLE;
+ }
+}
+
+static void
+Ap15EmcTimingSet(
+ NvRmDeviceHandle hRmDevice,
+ NvBool FreqRising,
+ NvBool BeforeDividerChange,
+ const NvRmAp15EmcTimingConfig* pEmcConfig)
+{
+ // Write shadow timing registers
+ if (FreqRising == BeforeDividerChange) // "overlap down" parameters
+ {
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_TIMING0_0, pEmcConfig->Timing0Reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_TIMING1_0, pEmcConfig->Timing1Reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_TIMING2_0, pEmcConfig->Timing2Reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_TIMING3_0, pEmcConfig->Timing3Reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_TIMING5_0, pEmcConfig->Timing5Reg);
+ }
+ else // "overlap up" parameters
+ {
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_TIMING4_0, pEmcConfig->Timing4Reg);
+
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_FBIO_CFG6_0, pEmcConfig->FbioCfg6Reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_FBIO_DQSIB_DLY_0, pEmcConfig->FbioDqsibDly);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_FBIO_QUSE_DLY_0, pEmcConfig->FbioQuseDly);
+ }
+ // Trigger active register update from shadow
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_TIMING_CONTROL_0, 0x1);
+
+ // Make sure update from shadow is completed
+ if (FreqRising == BeforeDividerChange)
+ {
+ while((NV_REGR(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_TIMING0_0)) != pEmcConfig->Timing0Reg);
+ }
+ else
+ {
+ while((NV_REGR(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_TIMING4_0)) != pEmcConfig->Timing4Reg);
+ // Re-trigger active register update (need it for trimmers only)
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_TIMING_CONTROL_0, 0x1);
+ }
+}
+
+static void
+Ap15Emc2xClockSet(
+ NvRmDeviceHandle hRmDevice,
+ NvBool FreqRising,
+ const NvRmAp15EmcTimingConfig* pEmcConfig)
+{
+ NvU32 reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_EMC,
+ EMC_2X_CLK_DIVISOR, pEmcConfig->Emc2xDivisor, reg);
+ NV_ASSERT(pEmcConfig->Emc2xKHz); // validate table entry
+
+ // Update EMC state
+ s_MemClocks.pEmcState->actual_freq = pEmcConfig->Emc2xKHz;
+ s_MemClocks.pEmcState->Divider = pEmcConfig->Emc2xDivisor;
+
+ // Set EMC parameters and EMC divisor (the EMC clock source is always
+ // PLLM0 starting from BL)
+ Ap15EmcTimingSet(hRmDevice, FreqRising, NV_TRUE, pEmcConfig);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0, reg);
+ Ap15EmcTimingSet(hRmDevice, FreqRising, NV_FALSE, pEmcConfig);
+}
+
+static void
+Ap15McClockSet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmAp15EmcTimingConfig* pEmcConfig)
+{
+ NvU32 src, div;
+ NvU32 reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_MEM_0);
+ src = NV_DRF_VAL(CLK_RST_CONTROLLER, CLK_SOURCE_MEM, MEM_CLK_SRC, reg);
+ div = NV_DRF_VAL(CLK_RST_CONTROLLER, CLK_SOURCE_MEM, MEM_CLK_DIVISOR, reg);
+
+ // Update MC state
+ s_MemClocks.pMcState->actual_freq = pEmcConfig->McKHz;
+ s_MemClocks.pMcState->SourceClock = pEmcConfig->McClockSource;
+ s_MemClocks.pMcState->Divider = pEmcConfig->McDivisor;
+
+ // Set MC divisor before source, if new value is bigger than the old one
+ if (pEmcConfig->McDivisor > div)
+ {
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_MEM,
+ MEM_CLK_DIVISOR, pEmcConfig->McDivisor, reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_MEM_0, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ }
+
+ // Modify MC source if it is to be changed
+ if (pEmcConfig->McClockSource != src)
+ {
+ NvRmPrivMemoryClockReAttach(
+ hRmDevice, s_MemClocks.pMcInfo, s_MemClocks.pMcState);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_MEM,
+ MEM_CLK_SRC, pEmcConfig->McClockSource, reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_MEM_0, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ }
+
+ // Set MC divisor after source, if new value is smaller than the old one
+ if (pEmcConfig->McDivisor < div)
+ {
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_MEM,
+ MEM_CLK_DIVISOR, pEmcConfig->McDivisor, reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_MEM_0, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ }
+}
+
+void
+NvRmPrivAp15EmcConfigInit(NvRmDeviceHandle hRmDevice)
+{
+ NvU32 i, j, k, reg=0;
+ NvU32 ConfigurationsCount;
+ NvRmFreqKHz Emc2xKHz, McKHz, McMax;
+ NvRmFreqKHz PllM0KHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM0);
+ const NvOdmSdramControllerConfig* pEmcConfigurations =
+ NvOdmQuerySdramControllerConfigGet(&ConfigurationsCount, &reg);
+
+ // Init memory configuration structure
+ NV_ASSERT_SUCCESS(NvRmPrivGetClockState(
+ hRmDevice, NvRmPrivModuleID_ExternalMemoryController,
+ &s_MemClocks.pEmcInfo, &s_MemClocks.pEmcState));
+ NV_ASSERT_SUCCESS(NvRmPrivGetClockState(
+ hRmDevice, NvRmPrivModuleID_MemoryController,
+ &s_MemClocks.pMcInfo, &s_MemClocks.pMcState));
+ s_MemClocks.Index = NVRM_AP15_DFS_EMC_FREQ_STEPS; // invalid index
+ NvOsMemset(s_Ap15EmcConfigSortedTable, 0, // clean table
+ sizeof(s_Ap15EmcConfigSortedTable));
+
+ // Get EMC2x clock state from h/w
+ Ap15Emc2xFreqGet(hRmDevice);
+
+ // Check if configuration table is provided by ODM
+ if ((ConfigurationsCount == 0) || (pEmcConfigurations == NULL))
+ {
+ s_Ap15EmcConfigSortedTable[0].Emc2xKHz = 0; // invalidate PLLM0 entry
+ return;
+ }
+ if (reg != NV_EMC_BASIC_REV)
+ {
+ s_Ap15EmcConfigSortedTable[0].Emc2xKHz = 0; // invalidate PLLM0 entry
+ NV_ASSERT(!"Invalid configuration table revision");
+ return;
+ }
+
+ // Check PLLM0 range
+ NV_ASSERT(PllM0KHz);
+ if (PllM0KHz > (NvRmPrivGetSocClockLimits(
+ NvRmPrivModuleID_ExternalMemoryController)->MaxKHz))
+ {
+ s_Ap15EmcConfigSortedTable[0].Emc2xKHz = 0; // invalidate PLLM0 entry
+ NV_ASSERT(!"PLLM0 is outside supported EMC range");
+ return;
+ }
+
+ // Check if PLLM0 is configured by boot loader as EMC clock source
+ // (it can not and will not be changed by RM)
+ if (s_MemClocks.pEmcState->SourceClock !=
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_SRC_PLLM_OUT0)
+ {
+ s_Ap15EmcConfigSortedTable[0].Emc2xKHz = 0; // invalidate PLLM0 entry
+ NV_ASSERT(!"Other than PLLM0 clock source is used for EMC");
+ return;
+ }
+
+ // Sort list of EMC timing parameters in descending order of frequencies
+ // evenly divided down from PLLM0; find matching entry for boot divisor
+ for (i = 0, k = 0, Emc2xKHz = PllM0KHz; i < NVRM_AP15_DFS_EMC_FREQ_STEPS; )
+ {
+ s_Ap15EmcConfigSortedTable[i].Emc2xKHz = 0; // mark entry invalid
+ for (j = 0; j < ConfigurationsCount; j++)
+ {
+ // Find match with 1MHz tolerance for allowed configuration
+ if ((Emc2xKHz <= (pEmcConfigurations[j].SdramKHz * 2 + 1000)) &&
+ (Emc2xKHz >= (pEmcConfigurations[j].SdramKHz * 2 - 1000)))
+ {
+ s_Ap15EmcConfigSortedTable[i].Timing0Reg = pEmcConfigurations[j].EmcTiming0;
+ s_Ap15EmcConfigSortedTable[i].Timing1Reg = pEmcConfigurations[j].EmcTiming1;
+ s_Ap15EmcConfigSortedTable[i].Timing2Reg = pEmcConfigurations[j].EmcTiming2;
+ s_Ap15EmcConfigSortedTable[i].Timing3Reg = pEmcConfigurations[j].EmcTiming3;
+ s_Ap15EmcConfigSortedTable[i].Timing4Reg = pEmcConfigurations[j].EmcTiming4;
+ s_Ap15EmcConfigSortedTable[i].Timing5Reg = pEmcConfigurations[j].EmcTiming5;
+
+ s_Ap15EmcConfigSortedTable[i].FbioCfg6Reg =
+ pEmcConfigurations[j].EmcFbioCfg6;
+ s_Ap15EmcConfigSortedTable[i].FbioDqsibDly =
+ pEmcConfigurations[j].EmcFbioDqsibDly +
+ NvRmPrivGetEmcDqsibOffset(hRmDevice);
+ s_Ap15EmcConfigSortedTable[i].FbioQuseDly =
+ pEmcConfigurations[j].EmcFbioQuseDly;
+ s_Ap15EmcConfigSortedTable[i].CoreVoltageMv =
+ pEmcConfigurations[j].EmcCoreVoltageMv;
+
+ // Determine EMC and MC clock divisors, MC clock source
+ // (EMC always uses PLLM0 as a source), and CPU clock limit
+ s_Ap15EmcConfigSortedTable[i].Emc2xKHz = Emc2xKHz; // accurate KHz
+ if (i == 0)
+ {
+ /*
+ * The first table entry specifies parameters for EMC2xFreq
+ * = PLLM0 frequency; the divisor field in EMC fractional
+ * divider register is set to "0". The divisor field in MC
+ * divider is set to "1", so that Emc1xFreq ~ 75% of McFreq
+ * using PLLM0 as MC clock source, if maximum MC frequency
+ * limit is not violated. Otherwise, find the highest MC
+ * frequency below the limit with PLLP0 as a source.
+ */
+ s_Ap15EmcConfigSortedTable[i].Emc2xDivisor = 0;
+ McKHz = (PllM0KHz * 2) / 3;
+ McMax = NvRmPrivGetSocClockLimits(
+ NvRmPrivModuleID_MemoryController)->MaxKHz;
+ NV_ASSERT(McMax);
+ if (McKHz <= McMax)
+ {
+ s_Ap15EmcConfigSortedTable[i].McDivisor = 1;
+ s_Ap15EmcConfigSortedTable[i].McClockSource =
+ CLK_RST_CONTROLLER_CLK_SOURCE_MEM_0_MEM_CLK_SRC_PLLM_OUT0;
+ }
+ else if (NVRM_PLLP_FIXED_FREQ_KHZ <= McMax)
+ {
+ McKHz = NVRM_PLLP_FIXED_FREQ_KHZ;
+ s_Ap15EmcConfigSortedTable[i].McDivisor = 0;
+ s_Ap15EmcConfigSortedTable[i].McClockSource =
+ CLK_RST_CONTROLLER_CLK_SOURCE_MEM_0_MEM_CLK_SRC_PLLP_OUT0;
+ }
+ else
+ {
+ reg = (2 * NVRM_PLLP_FIXED_FREQ_KHZ + McMax - 1) / McMax;
+ McKHz = (2 * NVRM_PLLP_FIXED_FREQ_KHZ) / reg;
+ s_Ap15EmcConfigSortedTable[i].McDivisor = reg - 2;
+ s_Ap15EmcConfigSortedTable[i].McClockSource =
+ CLK_RST_CONTROLLER_CLK_SOURCE_MEM_0_MEM_CLK_SRC_PLLP_OUT0;
+ }
+ }
+ else
+ {
+ /*
+ * If i = 1, 2, ... the table entry specifies parameters
+ * for EMC2xFreq = PLLM0 frequency/(2 * k); the divisor
+ * field in EMC fractional divider register should be set
+ * as 2 * (2 * k) - 2 = 4 * k - 2. The divisor field in MC
+ * divider is determined so that Emc1xFreq ~ 85% of McFreq
+ * using the same PLLM0 as MC clock source
+ */
+ s_Ap15EmcConfigSortedTable[i].Emc2xDivisor = (k << 2) - 2;
+ s_Ap15EmcConfigSortedTable[i].McDivisor = (19 +
+ 17 * s_Ap15EmcConfigSortedTable[i].Emc2xDivisor) / 10;
+ s_Ap15EmcConfigSortedTable[i].McClockSource =
+ CLK_RST_CONTROLLER_CLK_SOURCE_MEM_0_MEM_CLK_SRC_PLLM_OUT0;
+ McKHz = 2 * PllM0KHz /
+ (s_Ap15EmcConfigSortedTable[i].McDivisor + 2);
+ }
+ if (s_Ap15EmcConfigSortedTable[i].Emc2xDivisor ==
+ s_MemClocks.pEmcState->Divider)
+ {
+ s_MemClocks.Index = i; // Boot configuration found
+ }
+ s_Ap15EmcConfigSortedTable[i].McKHz = McKHz;
+ /*
+ * H/w CPU clock limit is determined from inequality:
+ * 1 mcclk period + 12 cpuclk periods >= 2 emcclck periods, or
+ * CpuKHz <= 11.9 * McKHz * Emc2xKHz / (4 * McKHz - Emc2xKHz)
+ * with 0.1/12 ~ 0.8% margin
+ * S/w CPU clock limit is determined per s/w policy:
+ * CpuKHz <= CpuMax * PolicyTabel[PLLM0/(2*EMC2xKHz)] / 256
+ * Final CPU clock limit is minimum of the above limits
+ */
+ s_Ap15EmcConfigSortedTable[i].CpuLimitKHz =
+ (NvU32)NvDiv64(((NvU64)Emc2xKHz * McKHz * 119),
+ (((McKHz << 2) - Emc2xKHz) * 10));
+ reg = NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MaxKHz;
+ if (k != 0)
+ {
+ NV_ASSERT(k < NV_ARRAY_SIZE(s_Cpu2EmcRatioPolicyTable));
+ reg = (reg * s_Cpu2EmcRatioPolicyTable[k]) >> 8;
+ }
+ if (s_Ap15EmcConfigSortedTable[i].CpuLimitKHz > reg)
+ s_Ap15EmcConfigSortedTable[i].CpuLimitKHz = reg;
+
+ break;
+ }
+ }
+ if (s_Ap15EmcConfigSortedTable[i].Emc2xKHz != 0)
+ i++; // Entry found - advance sorting index
+ else if (i == 0)
+ break; // PLLM0 entry not found - abort sorting
+
+ Emc2xKHz = PllM0KHz / ((++k) << 1);
+ if (Emc2xKHz < NvRmPrivGetSocClockLimits(
+ NvRmPrivModuleID_ExternalMemoryController)->MinKHz)
+ break; // Abort sorting at minimum EMC frequency
+ }
+ // Check if match for boot configuration found
+ if (s_MemClocks.Index == NVRM_AP15_DFS_EMC_FREQ_STEPS)
+ s_Ap15EmcConfigSortedTable[0].Emc2xKHz = 0; // invalidate PLLM0 entry
+}
+
+static NvBool
+Ap15Emc2xClockSourceFind(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz DomainKHz,
+ NvRmFreqKHz* pCpuTargetKHz,
+ NvRmDfsSource* pDfsSource)
+{
+ NvU32 i;
+ NvBool FinalStep = NV_TRUE;
+ NV_ASSERT(DomainKHz <= MaxKHz);
+ pDfsSource->DividerSetting = 0; // no divider
+
+ // If PLLM0 entry in EMC frequeuncies table is invalid, EMC frequency
+ // will not be scaled; just fill in current EMC frequency
+ if (s_Ap15EmcConfigSortedTable[0].Emc2xKHz == 0)
+ {
+ pDfsSource->SourceId = NvRmClockSource_Invalid;
+ pDfsSource->SourceKHz = s_MemClocks.pEmcState->actual_freq;
+ pDfsSource->MinMv = NvRmVoltsMaximum; // no v-scaling in this case
+ return FinalStep;
+ }
+
+ // Only PLLM0 is used as EMC frequency source by DFS; its frequency is
+ // always within h/w limits
+ pDfsSource->SourceId = NvRmClockSource_PllM0;
+ NV_ASSERT(s_Ap15EmcConfigSortedTable[0].Emc2xKHz <= MaxKHz);
+
+ // Search sorted pre-defind EMC frequencies (divided down from PLLM0) for
+ // the entry above and closest to the traget that also has CPU limit above
+ // the CPU target. Use PLLM0 entry if not found.
+ for (i = NVRM_AP15_DFS_EMC_FREQ_STEPS; i > 0;)
+ {
+ i--;
+ if ((DomainKHz <= s_Ap15EmcConfigSortedTable[i].Emc2xKHz) &&
+ (*pCpuTargetKHz <= s_Ap15EmcConfigSortedTable[i].CpuLimitKHz))
+ break;
+ }
+
+ // Make sure the new entry is adjacent to the current (one step at a time)
+ if (i > (s_MemClocks.Index + 1))
+ {
+ i = s_MemClocks.Index + 1;
+ FinalStep = NV_FALSE; // need more steps to reach target
+ }
+ else if ((i + 1) < s_MemClocks.Index)
+ {
+ i = s_MemClocks.Index - 1;
+ FinalStep = NV_FALSE; // need more steps to reach target
+ }
+
+ // Record found EMC entry, and limit CPU target if necessary
+ pDfsSource->DividerSetting = i;
+ pDfsSource->SourceKHz = s_Ap15EmcConfigSortedTable[i].Emc2xKHz;
+ if (*pCpuTargetKHz > s_Ap15EmcConfigSortedTable[i].CpuLimitKHz)
+ *pCpuTargetKHz = s_Ap15EmcConfigSortedTable[i].CpuLimitKHz;
+ pDfsSource->MinMv = s_Ap15EmcConfigSortedTable[i].CoreVoltageMv;
+ return FinalStep;
+}
+
+static void
+Ap15Emc2xClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz* pDomainKHz,
+ const NvRmDfsSource* pDfsSource)
+{
+ NvU32 Index;
+ NvRmFreqKHz CpuFreq = NvRmPrivGetClockSourceFreq(NvRmClockSource_CpuBus);
+
+ // Always return the requested source frequency
+ *pDomainKHz = pDfsSource->SourceKHz;
+ NV_ASSERT(*pDomainKHz);
+
+ // If other than PLLM0 source is selected, EMC frequency is not scaled.
+ if (pDfsSource->SourceId != NvRmClockSource_PllM0)
+ return;
+
+ // Divider settings in EMC source descriptor is an index into the table of
+ // pre-defined EMC configurations in descending frequency order.
+ Index = pDfsSource->DividerSetting;
+ if (Index == s_MemClocks.Index)
+ return; // do nothing new index is the same as current
+
+ // In case of EMC frequency increase: check if EMC LL reservation should
+ // be enabled, reconfigure EMC, then MC (make sure MC never exceeds EMC2x)
+ // In case of EMC frequency decrease: reconfigure MC, then EMC (make sure
+ // MC never exceeds EMC2x) and check if EMC LL reservation can be disabled
+ if (Index < s_MemClocks.Index)
+ {
+ if (CpuFreq < (*pDomainKHz >> 1))
+ {
+ NVRM_AP15_EMCLL_RETRSV_ENABLE;
+ }
+ Ap15Emc2xClockSet(
+ hRmDevice, NV_TRUE, &s_Ap15EmcConfigSortedTable[Index]);
+ Ap15McClockSet(hRmDevice, &s_Ap15EmcConfigSortedTable[Index]);
+ }
+ else
+ {
+ Ap15McClockSet(hRmDevice, &s_Ap15EmcConfigSortedTable[Index]);
+ Ap15Emc2xClockSet(
+ hRmDevice, NV_FALSE, &s_Ap15EmcConfigSortedTable[Index]);
+
+ if (CpuFreq >= (*pDomainKHz >> 1))
+ {
+ NVRM_AP15_EMCLL_RETRSV_DISABLE;
+ }
+ }
+ s_MemClocks.Index = Index;
+}
+
+void
+NvRmPrivAp15FastClockConfig(NvRmDeviceHandle hRmDevice)
+{
+#if !NV_OAL
+ NvU32 divm1, divp2;
+ NvRmFreqKHz SclkKHz, CpuKHz, PllP2KHz, PllM1KHz;
+ NvRmFreqKHz FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM0);
+
+ // Set fastest EMC/MC configuration provided PLLM0 boot frequency matches
+ // one of the pre-defined configurations, i.e, it is the first entry in the
+ // sorted table
+ if (s_Ap15EmcConfigSortedTable[0].Emc2xKHz == FreqKHz)
+ {
+ for (;;)
+ {
+ Ap15Emc2xClockSet(
+ hRmDevice, NV_TRUE, &s_Ap15EmcConfigSortedTable[s_MemClocks.Index]);
+ Ap15McClockSet(hRmDevice, &s_Ap15EmcConfigSortedTable[s_MemClocks.Index]);
+ if (s_MemClocks.Index == 0)
+ break;
+ s_MemClocks.Index--;
+ }
+ }
+
+ // Set AVP/System Bus clock (now, with nominal core voltage it can be up
+ // to SoC maximum). First determine settings for PLLP and PLLM dividers
+ // to get maximum possible frequency on PLLP_OUT2 and PLLM_OUT1 outputs.
+ SclkKHz = NvRmPrivGetSocClockLimits(NvRmPrivModuleID_System)->MaxKHz;
+ NV_ASSERT(SclkKHz);
+
+ FreqKHz = NVRM_PLLP_FIXED_FREQ_KHZ;
+ PllP2KHz = SclkKHz;
+ divp2 = NvRmPrivFindFreqMaxBelow(
+ NvRmClockDivider_Fractional_2, FreqKHz, PllP2KHz, &PllP2KHz);
+
+ FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM0);
+ PllM1KHz = SclkKHz;
+ divm1 = NvRmPrivFindFreqMaxBelow(
+ NvRmClockDivider_Fractional_2, FreqKHz, PllM1KHz, &PllM1KHz);
+
+ // Now configure both dividers and select the output with highest frequency
+ // as a source for the system bus clock; reconfigure MIO as necessary
+ SclkKHz = NV_MAX(PllM1KHz, PllP2KHz);
+ FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_SystemBus);
+ if (FreqKHz < SclkKHz)
+ {
+ Ap15MioReconfigure(hRmDevice, SclkKHz);
+ }
+ NvRmPrivDividerSet(
+ hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllP2)->pInfo.pDivider,
+ divp2);
+ NvRmPrivDividerSet(
+ hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllM1)->pInfo.pDivider,
+ divm1);
+ if (SclkKHz == PllP2KHz)
+ {
+ NvRmPrivCoreClockSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_SystemBus)->pInfo.pCore,
+ NvRmClockSource_PllP2, 0, 0);
+ }
+ else
+ {
+ NvRmPrivCoreClockSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_SystemBus)->pInfo.pCore,
+ NvRmClockSource_PllM1, 0, 0);
+ }
+ if (FreqKHz >= SclkKHz)
+ {
+ Ap15MioReconfigure(hRmDevice, SclkKHz);
+ }
+ NvRmPrivBusClockInit(hRmDevice, SclkKHz);
+
+ // Set PLLC and CPU clock to SoC maximum - can be done now, when core
+ // voltage is guaranteed to be nominal, provided none of the display
+ // heads is already using PLLC as pixel clock source.
+ CpuKHz = NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MaxKHz;
+ FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllC0);
+ if (CpuKHz != FreqKHz)
+ {
+ NvRmPrivBoostPllC(hRmDevice);
+ }
+ NvRmPrivCoreClockSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBus)->pInfo.pCore,
+ NvRmClockSource_PllC0, 0, 0);
+#endif
+}
+
+void
+NvRmPrivAp15ClipCpuEmcHighLimits(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz* pCpuHighKHz,
+ NvRmFreqKHz* pEmcHighKHz)
+{
+#if !NV_OAL
+ NvU32 i;
+ NvRmFreqKHz EmcKHz;
+ NvRmFreqKHz MinKHz = NvRmPrivDfsGetMinKHz(NvRmDfsClockId_Emc);
+ NV_ASSERT(pEmcHighKHz && pCpuHighKHz);
+
+ // Nothing to do if no EMC scaling.
+ if (s_Ap15EmcConfigSortedTable[0].Emc2xKHz == 0)
+ return;
+
+ // Clip strategy: "throttling" - find the floor for EMC high limit
+ // (above domain minimum, of course)
+ if ((*pEmcHighKHz) < MinKHz)
+ *pEmcHighKHz = MinKHz;
+ for (i = 0; i < NVRM_AP15_DFS_EMC_FREQ_STEPS; i++)
+ {
+ EmcKHz = s_Ap15EmcConfigSortedTable[i].Emc2xKHz >> 1;
+ if (EmcKHz <= (*pEmcHighKHz))
+ break;
+ }
+ if ((i == NVRM_AP15_DFS_EMC_FREQ_STEPS) || (EmcKHz < MinKHz))
+ {
+ i--;
+ EmcKHz = s_Ap15EmcConfigSortedTable[i].Emc2xKHz >> 1;
+ }
+ *pEmcHighKHz = EmcKHz;
+
+ // Clip strategy: "throttling" - restrict CPU high limit by EMC
+ // configuration ((above domain minimum, of course)
+ if ((*pCpuHighKHz) > s_Ap15EmcConfigSortedTable[i].CpuLimitKHz)
+ (*pCpuHighKHz) = s_Ap15EmcConfigSortedTable[i].CpuLimitKHz;
+ if ((*pCpuHighKHz) < NvRmPrivDfsGetMinKHz(NvRmDfsClockId_Cpu))
+ *pCpuHighKHz = NvRmPrivDfsGetMinKHz(NvRmDfsClockId_Cpu);
+#endif
+}
+
+NvRmFreqKHz
+NvRmPrivAp15GetEmcSyncFreq(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module)
+{
+ NvRmFreqKHz FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM0);
+
+ switch (Module)
+ {
+ case NvRmModuleID_2D:
+ case NvRmModuleID_Epp:
+ // 2D/EPP frequency is dynamically synchronized with current EMC speed
+ // (high if EMC divisor 0, and low otherwise)
+ if (s_MemClocks.pEmcState && (s_MemClocks.pEmcState->Divider != 0))
+ FreqKHz = FreqKHz / NVRM_PLLM_2D_LOW_SPEED_RATIO;
+ else
+ FreqKHz = FreqKHz / NVRM_PLLM_2D_HIGH_SPEED_RATIO;
+ break;
+
+ case NvRmModuleID_GraphicsHost:
+ // Host frequency is static, synchronized with EMC range set by BCT
+ FreqKHz = FreqKHz / NVRM_PLLM_HOST_SPEED_RATIO;
+ break;
+
+ default:
+ NV_ASSERT(!"Invalid module for EMC synchronization");
+ FreqKHz = NvRmPrivGetSocClockLimits(Module)->MaxKHz;
+ break;
+ }
+ return FreqKHz;
+}
+
+/*****************************************************************************/
+
+static void
+Ap15SystemClockSourceFind(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz DomainKHz,
+ NvRmDfsSource* pDfsSource)
+{
+ NvU32 i;
+ NvRmFreqKHz SourceKHz;
+ NV_ASSERT(DomainKHz <= MaxKHz);
+ pDfsSource->DividerSetting = 0; // no divider
+
+ // 1st try oscillator
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
+ NV_ASSERT(SourceKHz <= MaxKHz);
+ if (DomainKHz <= SourceKHz)
+ {
+ pDfsSource->SourceId = NvRmClockSource_ClkM;
+ pDfsSource->SourceKHz = SourceKHz;
+ goto get_mv;
+ }
+
+ // 2nd choice - doubler
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkD);
+ NV_ASSERT(SourceKHz <= MaxKHz);
+ if (DomainKHz <= SourceKHz)
+ {
+ pDfsSource->SourceId = NvRmClockSource_ClkD;
+ pDfsSource->SourceKHz = SourceKHz;
+ goto get_mv;
+ }
+
+ /*
+ * 3rd option - PLLP divider per policy specification. Find
+ * the policy entry with source frequency closest and above requested.
+ * If requested frequency exceeds all policy options within domain
+ * maximum limit, select the entry with the highest possible frequency.
+ */
+ for (i = 0; i < s_Ap15PllPSystemClockPolicyEntries; i++)
+ {
+ SourceKHz = s_Ap15PllPSystemClockPolicy[i].SourceKHz;
+ if (SourceKHz > MaxKHz)
+ {
+ NV_ASSERT(i);
+ i--;
+ break;
+ }
+ if (DomainKHz <= SourceKHz)
+ {
+ break;
+ }
+ }
+ if (i == s_Ap15PllPSystemClockPolicyEntries)
+ {
+ i--; // last/highest source is the best we can do
+ }
+ pDfsSource->SourceId = s_Ap15PllPSystemClockPolicy[i].SourceId;
+ pDfsSource->SourceKHz = s_Ap15PllPSystemClockPolicy[i].SourceKHz;
+ pDfsSource->DividerSetting = s_Ap15PllPSystemClockPolicy[i].DividerSetting;
+
+ /*
+ * 4st and final option - PLLM divider fixed at maximum possible frequency
+ * during initialization. Select PLLP/PLLM divider according to the
+ * following rule: select the divider with smaller frequency if it is equal
+ * or above the target frequency, otherwise select the divider with bigger
+ * output frequency.
+ */
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM1);
+ NV_ASSERT(SourceKHz <= MaxKHz);
+ if (SourceKHz > pDfsSource->SourceKHz)
+ {
+ if (pDfsSource->SourceKHz >= DomainKHz)
+ goto get_mv; // keep PLLP divider as a source
+ }
+ else // SourceKHz <= pDfsSource->SourceKHz
+ {
+ if (SourceKHz < DomainKHz)
+ goto get_mv; // keep PLLP divider as a source
+ }
+ // Select PLLM_OUT1 divider as a source (considered as a fixed source -
+ // divider settings are ignored)
+ pDfsSource->SourceId = NvRmClockSource_PllM1;
+ pDfsSource->SourceKHz = SourceKHz;
+
+get_mv:
+ // Finally get operational voltage for found source
+ pDfsSource->MinMv = NvRmPrivModuleVscaleGetMV(
+ hRmDevice, NvRmPrivModuleID_System, pDfsSource->SourceKHz);
+}
+
+static void
+Ap15CpuClockSourceFind(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz DomainKHz,
+ NvRmDfsSource* pDfsSource)
+{
+ NvU32 i;
+ NvRmFreqKHz SourceKHz;
+ NV_ASSERT(DomainKHz <= MaxKHz);
+ pDfsSource->DividerSetting = 0; // no divider
+
+ // 1st try oscillator
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
+ NV_ASSERT(SourceKHz <= MaxKHz);
+ if (DomainKHz <= SourceKHz)
+ {
+ pDfsSource->SourceId = NvRmClockSource_ClkM;
+ pDfsSource->SourceKHz = SourceKHz;
+ goto get_mv;
+ }
+
+ // 2nd choice - doubler - no longer supported
+ // 3rd choice - PLLP divider per policy specification
+ SourceKHz =
+ s_Ap15PllPCpuClockPolicy[s_Ap15PllPCpuClockPolicyEntries-1].SourceKHz;
+ NV_ASSERT(SourceKHz <= MaxKHz);
+ if (DomainKHz <= SourceKHz)
+ {
+ // The requested frequency is within PLLP divider policy table, and all
+ // policy entries are within domain maximum limit. Then, find the entry
+ // with source frequency closest and above the requested.
+ for (i = 0; i < s_Ap15PllPCpuClockPolicyEntries; i++)
+ {
+ SourceKHz = s_Ap15PllPCpuClockPolicy[i].SourceKHz;
+ if (DomainKHz <= SourceKHz)
+ break;
+ }
+ if (s_Ap15PllPCpuClockPolicy[i].DividerSetting == 0)
+ pDfsSource->SourceId = NvRmClockSource_PllP0; // Bypass 1:1 divider
+ else
+ pDfsSource->SourceId = s_Ap15PllPCpuClockPolicy[i].SourceId;
+ pDfsSource->SourceKHz = s_Ap15PllPCpuClockPolicy[i].SourceKHz;
+ pDfsSource->DividerSetting = s_Ap15PllPCpuClockPolicy[i].DividerSetting;
+ goto get_mv;
+ }
+
+ // 4th choice PLLM base output
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM0);
+ NV_ASSERT(SourceKHz <= MaxKHz);
+ if (DomainKHz <= SourceKHz)
+ {
+ pDfsSource->SourceId = NvRmClockSource_PllM0;
+ pDfsSource->SourceKHz = SourceKHz;
+ goto get_mv;
+ }
+
+ // 5th choice PLLP base output (not used - covered by 3rd choice, case 1:1)
+ // 6th and final choice - PLLC base output at domain limit
+ pDfsSource->SourceId = NvRmClockSource_PllC0;
+ pDfsSource->SourceKHz = MaxKHz;
+
+get_mv:
+ // Finally get operational voltage for found source
+ pDfsSource->MinMv = NvRmPrivModuleVscaleGetMV(
+ hRmDevice, NvRmModuleID_Cpu, pDfsSource->SourceKHz);
+}
+
+static void
+Ap15SystemBusClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz* pDomainKHz,
+ const NvRmDfsSource* pDfsSource)
+{
+ NvRmClockSource SourceId = pDfsSource->SourceId;
+ const NvRmCoreClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_SystemBus)->pInfo.pCore;
+
+ switch(SourceId)
+ {
+ case NvRmClockSource_PllP2:
+ // Reconfigure PLLP variable divider if it is used as a source
+ NvRmPrivDividerSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(SourceId)->pInfo.pDivider,
+ pDfsSource->DividerSetting);
+ // fall through
+ case NvRmClockSource_PllM1:
+ case NvRmClockSource_ClkD:
+ case NvRmClockSource_ClkM:
+ break; // fixed sources - do nothing
+ default:
+ NV_ASSERT(!"Invalid source (per policy)");
+ }
+ NV_ASSERT_SUCCESS(NvRmPrivCoreClockConfigure(
+ hRmDevice, pCinfo, MaxKHz, pDomainKHz, &SourceId));
+}
+
+static void
+Ap15CpuBusClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz* pDomainKHz,
+ const NvRmDfsSource* pDfsSource)
+{
+ NvRmClockSource SourceId = pDfsSource->SourceId;
+ const NvRmCoreClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBus)->pInfo.pCore;
+
+ switch(SourceId)
+ {
+ case NvRmClockSource_PllC0:
+ // DFS PLLC policy - configure PLLC if disabled; otherwise keep
+ // keep it as is (the latter means either DFS has already set it
+ // to domain limit, or PLLC is used as display pixel clock source)
+ if (NvRmPrivGetClockSourceFreq(NvRmClockSource_PllC0) <=
+ NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM))
+ {
+ NvRmFreqKHz TargetKHz = pDfsSource->SourceKHz;
+ NvRmPrivAp15PllConfigureSimple(
+ hRmDevice, SourceId, MaxKHz, &TargetKHz);
+ }
+ break;
+ case NvRmClockSource_PllP4:
+ // Reconfigure PLLP variable divider if it is used as a source;
+ // If source frequency is going down, get EMC configuration is ready
+ if (pDfsSource->SourceKHz < NvRmPrivGetClockSourceFreq(SourceId))
+ NvRmPrivAp15SetEmcForCpuSrcSwitch(hRmDevice);
+ NvRmPrivDividerSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(SourceId)->pInfo.pDivider,
+ pDfsSource->DividerSetting);
+ // fall through
+ case NvRmClockSource_PllP0:
+ case NvRmClockSource_PllM0:
+ case NvRmClockSource_ClkD:
+ case NvRmClockSource_ClkM:
+ break; // fixed sources - do nothing
+ default:
+ NV_ASSERT(!"Invalid source (per policy)");
+ }
+ NV_ASSERT_SUCCESS(NvRmPrivCoreClockConfigure(
+ hRmDevice, pCinfo, MaxKHz, pDomainKHz, &SourceId));
+}
+
+/*****************************************************************************/
+/* If time is specified in ns, and frequency in KHz, then cycles =
+ * (ns * KHz / 10^6) = (ns * (KHz * 2^20 / 10^6) / 2^20) = (ns * KiHz / 2^20),
+ * where KiHz = (KHz * 2^20 / 10^6) ~ (KHz * 4295 / 4096) with error < 0.001%.
+ */
+#define NVRM_TIME_TO_CYCLES(ns, KiHz) (((ns * KiHz) + (0x1 << 20)- 1) >> 20)
+
+#define NV_DRF_MAX_NUM(d,r,f,n) \
+ ((((n) <= NV_FIELD_MASK(d##_##r##_0_##f##_RANGE)) ? \
+ (n) : NV_FIELD_MASK(d##_##r##_0_##f##_RANGE)) << \
+ NV_FIELD_SHIFT(d##_##r##_0_##f##_RANGE))
+
+static void
+Ap15MioReconfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MioKHz)
+{
+ NvU32 reg, mask;
+ NvU32 MioKiHz = ((MioKHz * 4295) >> 12);
+ NvOdmAsynchMemConfig MemConfig;
+ /*
+ * Reconfigure MIO timing when clock frequency changes. Check only Async
+ * Memory devices connected to CS1/MIO_B and CS3/MIO_A (CS0 is dedicated
+ * for NOR, we do not care after boot, and CS2 is dedicated to SDRAM with
+ * its own clock)
+ */
+ if (NvOdmQueryAsynchMemConfig(1, &MemConfig) == NV_TRUE)
+ {
+ reg = NV_REGR(hRmDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_XMB_MIO_CFG_0);
+ mask =
+ NV_DRF_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_B_WR_DEAD_TIME, 0xFFFFFFFFUL) |
+ NV_DRF_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_B_WR_TIME, 0xFFFFFFFFUL) |
+ NV_DRF_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_B_RD_DEAD_TIME, 0xFFFFFFFFUL) |
+ NV_DRF_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_B_RD_TIME, 0xFFFFFFFFUL);
+ reg = (reg & (~mask)) |
+ NV_DRF_MAX_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_B_WR_DEAD_TIME,
+ NVRM_TIME_TO_CYCLES(MemConfig.WriteDeadTime, MioKiHz)) |
+ NV_DRF_MAX_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_B_WR_TIME,
+ NVRM_TIME_TO_CYCLES(MemConfig.WriteAccessTime, MioKiHz)) |
+ NV_DRF_MAX_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_B_RD_DEAD_TIME,
+ NVRM_TIME_TO_CYCLES(MemConfig.ReadDeadTime, MioKiHz)) |
+ NV_DRF_MAX_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_B_RD_TIME,
+ NVRM_TIME_TO_CYCLES(MemConfig.ReadAccessTime, MioKiHz));
+ NV_REGW(hRmDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_XMB_MIO_CFG_0, reg);
+ }
+ if (NvOdmQueryAsynchMemConfig(3, &MemConfig) == NV_TRUE)
+ {
+ reg = NV_REGR(hRmDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_XMB_MIO_CFG_0);
+ mask =
+ NV_DRF_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_A_WR_DEAD_TIME, 0xFFFFFFFFUL) |
+ NV_DRF_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_A_WR_TIME, 0xFFFFFFFFUL) |
+ NV_DRF_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_A_RD_DEAD_TIME, 0xFFFFFFFFUL) |
+ NV_DRF_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_A_RD_TIME, 0xFFFFFFFFUL);
+ reg = (reg & (~mask)) |
+ NV_DRF_MAX_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_A_WR_DEAD_TIME,
+ NVRM_TIME_TO_CYCLES(MemConfig.WriteDeadTime, MioKiHz)) |
+ NV_DRF_MAX_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_A_WR_TIME,
+ NVRM_TIME_TO_CYCLES(MemConfig.WriteAccessTime, MioKiHz)) |
+ NV_DRF_MAX_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_A_RD_DEAD_TIME,
+ NVRM_TIME_TO_CYCLES(MemConfig.ReadDeadTime, MioKiHz)) |
+ NV_DRF_MAX_NUM(APB_MISC_PP, XMB_MIO_CFG, MIO_A_RD_TIME,
+ NVRM_TIME_TO_CYCLES(MemConfig.ReadAccessTime, MioKiHz));
+ NV_REGW(hRmDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_XMB_MIO_CFG_0, reg);
+ }
+}
+
+NvBool NvRmPrivAp15DfsClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDfsFrequencies* pMaxKHz,
+ NvRmDfsFrequencies* pDfsKHz)
+{
+ NvU32 i;
+ NvBool Status;
+ NvRmFreqKHz FreqKHz;
+ NvRmDfsSource CpuClockSource;
+ NvRmDfsSource SystemClockSource;
+ NvRmDfsSource Emc2xClockSource;
+ NvBool CpuKHzUp = pDfsKHz->Domains[NvRmDfsClockId_Cpu] >
+ NvRmPrivGetClockSourceFreq(NvRmClockSource_CpuBus);
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pMaxKHz && pDfsKHz);
+
+ /*
+ * Adjust System bus core clock. It should be sufficient to supply AVP,
+ * and all bus clocks. Also make sure that AHB bus frequency is above
+ * the one requested for APB clock.
+ */
+ for (FreqKHz = 0, i = 1; i < NvRmDfsClockId_Num; i++)
+ {
+ if ((i != NvRmDfsClockId_Cpu) &&
+ (i != NvRmDfsClockId_Emc))
+ {
+ FreqKHz = (FreqKHz > pDfsKHz->Domains[i]) ?
+ FreqKHz : pDfsKHz->Domains[i];
+ }
+ }
+ pDfsKHz->Domains[NvRmDfsClockId_System] = FreqKHz;
+
+#if LIMIT_SYS_TO_VDE_RATIO
+ if (pDfsKHz->Domains[NvRmDfsClockId_Vpipe] <
+ (FreqKHz >> (LIMIT_SYS_TO_VDE_RATIO - 1)))
+ {
+ pDfsKHz->Domains[NvRmDfsClockId_Vpipe] =
+ (FreqKHz >> (LIMIT_SYS_TO_VDE_RATIO - 1));
+ }
+#endif
+
+#if LIMIT_SYS_TO_AHB_APB_RATIOS
+ if (pDfsKHz->Domains[NvRmDfsClockId_Apb] < (FreqKHz >> 2))
+ {
+ pDfsKHz->Domains[NvRmDfsClockId_Apb] = (FreqKHz >> 2);
+ }
+ if (pDfsKHz->Domains[NvRmDfsClockId_Ahb] < (FreqKHz >> 1))
+ {
+ pDfsKHz->Domains[NvRmDfsClockId_Ahb] = (FreqKHz >> 1);
+ }
+#endif
+ if (pDfsKHz->Domains[NvRmDfsClockId_Ahb] <
+ pDfsKHz->Domains[NvRmDfsClockId_Apb])
+ {
+ pDfsKHz->Domains[NvRmDfsClockId_Ahb] =
+ pDfsKHz->Domains[NvRmDfsClockId_Apb];
+ }
+
+ // Find clock sources for CPU, System and Memory clocks. H/w requirement
+ // to increase memory clocks in steps, may limit CPU clock as well
+ Ap15SystemClockSourceFind(hRmDevice,
+ pMaxKHz->Domains[NvRmDfsClockId_System],
+ pDfsKHz->Domains[NvRmDfsClockId_System],
+ &SystemClockSource);
+ Status = Ap15Emc2xClockSourceFind(hRmDevice,
+ (pMaxKHz->Domains[NvRmDfsClockId_Emc] << 1),
+ (pDfsKHz->Domains[NvRmDfsClockId_Emc] << 1),
+ &pDfsKHz->Domains[NvRmDfsClockId_Cpu],
+ &Emc2xClockSource);
+ Ap15CpuClockSourceFind(hRmDevice,
+ pMaxKHz->Domains[NvRmDfsClockId_Cpu],
+ pDfsKHz->Domains[NvRmDfsClockId_Cpu],
+ &CpuClockSource);
+
+#if !NV_OAL
+ // Adjust core voltage for the new clock sources before actual change
+ NvRmPrivVoltageScale(NV_TRUE, CpuClockSource.MinMv,
+ SystemClockSource.MinMv, Emc2xClockSource.MinMv);
+#endif
+
+ // Configure System bus and derived clocks. Note that APB is the only
+ // clock in system complex that may have different (lower) maximum
+ // limit - pass it explicitly to set function.
+ if (FreqKHz < NvRmPrivGetClockSourceFreq(NvRmClockSource_SystemBus))
+ FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_SystemBus);
+ Ap15MioReconfigure(hRmDevice, FreqKHz); // MIO timing for max frequency
+ Ap15SystemBusClockConfigure(hRmDevice,
+ pMaxKHz->Domains[NvRmDfsClockId_System],
+ &pDfsKHz->Domains[NvRmDfsClockId_System],
+ &SystemClockSource);
+ pDfsKHz->Domains[NvRmDfsClockId_Avp] =
+ pDfsKHz->Domains[NvRmDfsClockId_System]; // no AVP clock skipping
+ NvRmPrivBusClockFreqSet(hRmDevice,
+ pDfsKHz->Domains[NvRmDfsClockId_System],
+ &pDfsKHz->Domains[NvRmDfsClockId_Vpipe],
+ &pDfsKHz->Domains[NvRmDfsClockId_Ahb],
+ &pDfsKHz->Domains[NvRmDfsClockId_Apb],
+ pMaxKHz->Domains[NvRmDfsClockId_Apb]);
+ Ap15MioReconfigure(hRmDevice, pDfsKHz->Domains[NvRmDfsClockId_Ahb]);
+
+ // Configure CPU core clock before Memory if CPU frequency goes down
+ if (!CpuKHzUp)
+ {
+ Ap15CpuBusClockConfigure(hRmDevice,
+ pMaxKHz->Domains[NvRmDfsClockId_Cpu],
+ &pDfsKHz->Domains[NvRmDfsClockId_Cpu],
+ &CpuClockSource);
+ }
+ // Configure Memory clocks and convert frequency to DFS EMC 1x domain
+ FreqKHz = pDfsKHz->Domains[NvRmDfsClockId_Emc] << 1;
+ Ap15Emc2xClockConfigure(hRmDevice,
+ (pMaxKHz->Domains[NvRmDfsClockId_Emc] << 1),
+ &FreqKHz, &Emc2xClockSource);
+ pDfsKHz->Domains[NvRmDfsClockId_Emc] = FreqKHz >> 1;
+ // Configure CPU core clock after Memory if CPU frequency goes up
+ if (CpuKHzUp)
+ {
+ Ap15CpuBusClockConfigure(hRmDevice,
+ pMaxKHz->Domains[NvRmDfsClockId_Cpu],
+ &pDfsKHz->Domains[NvRmDfsClockId_Cpu],
+ &CpuClockSource);
+ }
+
+#if !NV_OAL
+ // Adjust core voltage for the new clock sources after actual change
+ NvRmPrivVoltageScale(NV_FALSE, CpuClockSource.MinMv,
+ SystemClockSource.MinMv, Emc2xClockSource.MinMv);
+#endif
+ return Status;
+}
+
+void
+NvRmPrivAp15DfsClockFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmDfsFrequencies* pDfsKHz)
+{
+ NvRmFreqKHz SystemFreq;
+ const NvRmCoreClockInfo* pCinfo;
+ NV_ASSERT(hRmDevice && pDfsKHz);
+
+ // Get frequencies of the System core clock, AVP clock (the same as System
+ // - no clock skipping), AHB, APB, and V-pipe bus clock frequencies
+ pCinfo = NvRmPrivGetClockSourceHandle(NvRmClockSource_SystemBus)->pInfo.pCore;
+ SystemFreq = NvRmPrivCoreClockFreqGet(hRmDevice, pCinfo);
+ pDfsKHz->Domains[NvRmDfsClockId_System] = SystemFreq;
+ pDfsKHz->Domains[NvRmDfsClockId_Avp] = SystemFreq;
+
+ NvRmPrivBusClockFreqGet(
+ hRmDevice, SystemFreq,
+ &pDfsKHz->Domains[NvRmDfsClockId_Vpipe],
+ &pDfsKHz->Domains[NvRmDfsClockId_Ahb],
+ &pDfsKHz->Domains[NvRmDfsClockId_Apb]);
+
+ // Get CPU core clock frequencies
+ pCinfo = NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBus)->pInfo.pCore;
+ pDfsKHz->Domains[NvRmDfsClockId_Cpu] =
+ NvRmPrivCoreClockFreqGet(hRmDevice, pCinfo);
+
+ // Get EMC clock frequency (DFS monitors EMC 1x domain)
+ Ap15Emc2xFreqGet(hRmDevice); // Get EMC2x clock state from h/w
+ pDfsKHz->Domains[NvRmDfsClockId_Emc] =
+ (s_MemClocks.pEmcState->actual_freq >> 1);
+}
+
+void
+NvRmPrivAp15DfsVscaleFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmMilliVolts TargetMv,
+ NvRmDfsFrequencies* pDfsKHz)
+{
+ NvU32 i;
+ NvRmMilliVolts v;
+ NvRmFreqKHz Fa, Fb, f;
+ NvRmDfsSource DfsClockSource;
+ NvRmFreqKHz CpuMaxKHz = NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MaxKHz;
+ NvRmFreqKHz SysMaxKHz =
+ NvRmPrivGetSocClockLimits(NvRmPrivModuleID_System)->MaxKHz;
+ NV_ASSERT(hRmDevice && pDfsKHz);
+
+ // If PLLM0 entry in EMC scaling table is valid, search the table for
+ // the entry below and closest to the traget voltage. Otherwise, there
+ // is no EMC scaling - just return current EMC frequency.
+ pDfsKHz->Domains[NvRmDfsClockId_Emc] =
+ (s_MemClocks.pEmcState->actual_freq >> 1);
+ f = NvRmFreqMaximum; // assume CPU is not throttled by EMC
+ if (s_Ap15EmcConfigSortedTable[0].Emc2xKHz != 0)
+ {
+ for (i = 0; i < (NVRM_AP15_DFS_EMC_FREQ_STEPS - 1); i++)
+ {
+ if ((s_Ap15EmcConfigSortedTable[i+1].Emc2xKHz == 0) ||
+ (s_Ap15EmcConfigSortedTable[i].CoreVoltageMv <= TargetMv))
+ break; // exit if found entry or next entry is invalid
+ }
+ pDfsKHz->Domains[NvRmDfsClockId_Emc] =
+ (s_Ap15EmcConfigSortedTable[i].Emc2xKHz >> 1);
+ f = s_Ap15EmcConfigSortedTable[i].CpuLimitKHz; // throttle CPU
+ }
+
+ // Binary search for maximum CPU frequency, with source that can be used
+ // at target voltage or below
+ Fb = NV_MIN(CpuMaxKHz, f);
+ Fa = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
+ NV_ASSERT(Fa <= Fb);
+ while ((Fb - Fa) > 1000) // 1MHz resolution
+ {
+ f = (Fa + Fb) >> 1;
+ Ap15CpuClockSourceFind(hRmDevice, CpuMaxKHz, f, &DfsClockSource);
+ v = DfsClockSource.MinMv;
+ if (v <= TargetMv)
+ Fa = f;
+ else
+ Fb = f;
+ }
+ pDfsKHz->Domains[NvRmDfsClockId_Cpu] = Fa;
+
+ // Binary search for maximum System/Avp frequency, with source that can be used
+ // at target voltage or below
+ Fb = SysMaxKHz;
+ Fa = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
+ NV_ASSERT(Fa <= Fb);
+ while ((Fb - Fa) > 1000) // 1MHz resolution
+ {
+ f = (Fa + Fb) >> 1;
+ Ap15SystemClockSourceFind(hRmDevice, SysMaxKHz, f, &DfsClockSource);
+ v = DfsClockSource.MinMv;
+ if (v <= TargetMv)
+ Fa = f;
+ else
+ Fb = f;
+ }
+ pDfsKHz->Domains[NvRmDfsClockId_System] = Fa;
+ pDfsKHz->Domains[NvRmDfsClockId_Avp] = Fa;
+ pDfsKHz->Domains[NvRmDfsClockId_Ahb] = Fa;
+ pDfsKHz->Domains[NvRmDfsClockId_Apb] = Fa;
+ pDfsKHz->Domains[NvRmDfsClockId_Vpipe] = Fa;
+}
+
+/*****************************************************************************/
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clock_misc.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clock_misc.c
new file mode 100644
index 000000000000..54ac28bfd1f7
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clock_misc.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvassert.h"
+#include "nvos.h"
+#include "nvrm_init.h"
+#include "nvrm_drf.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_clocks.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_hardware_access.h"
+#include "ap15rm_private.h"
+#include "ap15rm_clocks.h"
+#include "ap16/arapb_misc.h"
+#include "ap15/arahb_arbc.h"
+#include "ap15/armc.h"
+#include "ap15/aremc.h"
+#include "ap15/arfuse.h"
+#include "ap15/arclk_rst.h"
+
+
+// This list requires pre-sorted info in bond-out registers order and bond-out
+// register bit shift order (MSB-to-LSB).
+static const NvU32 s_Ap15BondOutTable[] =
+{
+ // BOND_OUT_L bits
+ NVRM_DEVICE_UNKNOWN, // NV_DEVID_CPU
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_MODULE_ID( NvRmModuleID_Ac97, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Rtc, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Timer, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Uart, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Uart, 1 ),
+ NVRM_MODULE_ID( NvRmPrivModuleID_Gpio, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Sdio, 1 ),
+ NVRM_MODULE_ID( NvRmModuleID_Spdif, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_I2s, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_I2c, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Nand, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Sdio, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Hsmmc, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Twc, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Pwm, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_I2s, 1 ),
+ NVRM_MODULE_ID( NvRmModuleID_Epp, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Vi, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_2D, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Usb2Otg, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Isp, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_3D, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Ide, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Display, 1 ),
+ NVRM_MODULE_ID( NvRmModuleID_Display, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_GraphicsHost, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Vcp, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_CacheMemCtrl, 0 ),
+ NVRM_DEVICE_UNKNOWN, // NV_DEVID_COP_CACHE
+
+ // BOND_OUT_H bits
+ NVRM_MODULE_ID( NvRmPrivModuleID_MemoryController, 0 ),
+ NVRM_DEVICE_UNKNOWN, // NV_DEVID_AHB_DMA
+ NVRM_MODULE_ID( NvRmPrivModuleID_ApbDma, 0 ),
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_MODULE_ID( NvRmModuleID_Kbc, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_SysStatMonitor, 0 ),
+ NVRM_DEVICE_UNKNOWN, // PMC
+ NVRM_MODULE_ID( NvRmModuleID_Fuse, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Slink, 0 ),
+ NVRM_DEVICE_UNKNOWN, // SBC1
+ NVRM_MODULE_ID( NvRmModuleID_Nor, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Spi, 0 ),
+ NVRM_DEVICE_UNKNOWN, // SBC2
+ NVRM_MODULE_ID( NvRmModuleID_Xio, 0 ),
+ NVRM_DEVICE_UNKNOWN, // SBC3
+ NVRM_MODULE_ID( NvRmModuleID_Dvc, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Dsi, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Tvo, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Mipi, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Hdmi, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Csi, 0 ),
+ NVRM_DEVICE_UNKNOWN, // TVDAC
+ NVRM_MODULE_ID( NvRmModuleID_I2c, 1 ),
+ NVRM_MODULE_ID( NvRmModuleID_Uart, 2 ),
+ NVRM_DEVICE_UNKNOWN, // SPROM
+ NVRM_MODULE_ID( NvRmPrivModuleID_ExternalMemoryController, 0 ),
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_MODULE_ID( NvRmModuleID_Mpe, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Vde, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_BseA, 0 ),
+ NVRM_DEVICE_UNKNOWN, //BSEV
+};
+
+/**
+ * Enable HDCP and Macrovision
+ */
+static void
+NvRmPrivContentProtectionFuses( NvRmDeviceHandle hRm )
+{
+ NvU32 reg;
+ NvU32 clk_rst;
+
+ /* need to set FUSEWRDATA3_RESERVED_PRODUCTION__PRI_ALIAS to 0x3 and
+ * enable the bypass.
+ *
+ * bit 0: macrovision
+ * bit 1: hdcp
+ */
+
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Enable fuse clock
+ Ap15EnableModuleClock(hRm, NvRmModuleID_Fuse, NV_TRUE);
+#endif
+
+ clk_rst = NV_REGR( hRm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0 );
+ clk_rst = NV_FLD_SET_DRF_NUM( CLK_RST_CONTROLLER, MISC_CLK_ENB,
+ CFG_ALL_VISIBLE, 1, clk_rst );
+ NV_REGW( hRm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0, clk_rst );
+
+ reg = NV_DRF_NUM( FUSE, FUSEWRDATA3,
+ FUSEWRDATA_RESERVED_PRODUCTION__PRI_ALIAS_0, 0x3 );
+ NV_REGW( hRm, NvRmModuleID_Fuse, 0, FUSE_FUSEWRDATA3_0, reg );
+
+ reg = NV_DRF_DEF( FUSE, FUSEBYPASS, FUSEBYPASS_VAL, ENABLED );
+ NV_REGW( hRm, NvRmModuleID_Fuse, 0, FUSE_FUSEBYPASS_0, reg );
+
+ clk_rst = NV_FLD_SET_DRF_NUM( CLK_RST_CONTROLLER, MISC_CLK_ENB,
+ CFG_ALL_VISIBLE, 0, clk_rst );
+ NV_REGW( hRm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0, clk_rst );
+
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Disable fuse clock
+ Ap15EnableModuleClock(hRm, NvRmModuleID_Fuse, NV_FALSE);
+#endif
+}
+
+#define NVRM_CONFIG_CLOCK(Module, SrcDef, DivNum) \
+do\
+{\
+ reg = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##Module##_0); \
+ if ((DivNum) > NV_DRF_VAL(CLK_RST_CONTROLLER, CLK_SOURCE_##Module, \
+ Module##_CLK_DIVISOR, reg)) \
+ {\
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_##Module, \
+ Module##_CLK_DIVISOR, (DivNum), reg); \
+ NV_REGW(rm, NvRmPrivModuleID_ClockAndReset, 0, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##Module##_0, reg); \
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY); \
+ }\
+ reg = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, CLK_SOURCE_##Module, \
+ Module##_CLK_SRC, SrcDef, reg); \
+ NV_REGW(rm, NvRmPrivModuleID_ClockAndReset, 0, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##Module##_0, reg); \
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY); \
+ if ((DivNum) < NV_DRF_VAL(CLK_RST_CONTROLLER, CLK_SOURCE_##Module, \
+ Module##_CLK_DIVISOR, reg))\
+ {\
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_##Module, \
+ Module##_CLK_DIVISOR, (DivNum), reg); \
+ NV_REGW(rm, NvRmPrivModuleID_ClockAndReset, 0, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##Module##_0, reg); \
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);\
+ }\
+} while(0)
+
+#define NVRM_SET_OSC_CLOCK(ClkModule, RstModule, H_L) \
+do\
+{\
+ if (RstOut##H_L & \
+ CLK_RST_CONTROLLER_RST_DEVICES_##H_L##_0_SWR_##RstModule##_RST_FIELD) \
+ {\
+ reg = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##ClkModule##_0); \
+ reg = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, CLK_SOURCE_##ClkModule, \
+ ClkModule##_CLK_SRC, CLK_M, reg); \
+ NV_REGW(rm, NvRmPrivModuleID_ClockAndReset, 0, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##ClkModule##_0, reg); \
+ }\
+} while(0)
+
+/**
+ * brings the minimum modules out of reset.
+ */
+void
+NvRmPrivAp15BasicReset( NvRmDeviceHandle rm )
+{
+#if !NV_OAL
+ NvU32 reg, RstOutL, RstOutH, ClkOutL, ClkOutH;
+ ExecPlatform env;
+
+ if (NvRmIsSimulation())
+ {
+ /* the memory system can't be used until the mem_init_done bit has
+ * been set. This is done by the bootrom for production systems.
+ */
+ reg = NV_REGR( rm, NvRmPrivModuleID_Ahb_Arb_Ctrl, 0,
+ AHB_ARBITRATION_XBAR_CTRL_0 );
+ reg = NV_FLD_SET_DRF_DEF( AHB_ARBITRATION, XBAR_CTRL, MEM_INIT_DONE,
+ DONE, reg );
+ NV_REGW( rm, NvRmPrivModuleID_Ahb_Arb_Ctrl, 0,
+ AHB_ARBITRATION_XBAR_CTRL_0, reg );
+ }
+
+ // FIXME: this takes the Big Hammer Approach. Take everything out
+ // of reset and enable all of the clocks. Then keep enabled only boot
+ // clocks and graphics host.
+
+ // get boot module reset state
+ RstOutL = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0);
+ RstOutH = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0);
+
+ // save boot clock enable state
+ ClkOutL = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0);
+ ClkOutH = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0);
+
+ /* write clk_out_enb_l */
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0, 0xFFFFFFFF );
+
+ /* write clk_out_enb_h */
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0, 0xFFFFFFFF );
+
+ // For AP15 default clock source selection is out of range for many modules
+ // Just copnfigure clocks so that reset is propagated correctly
+ env = NvRmPrivGetExecPlatform(rm);
+ if (env == ExecPlatform_Soc)
+ {
+ /*
+ * For peripheral modules that are not taken from reset, yet,
+ * use oscillator as a safe clock
+ */
+ NVRM_SET_OSC_CLOCK(I2S1, I2S1, L);
+ NVRM_SET_OSC_CLOCK(I2S2, I2S2, L);
+
+ NVRM_SET_OSC_CLOCK(I2C1, I2C1, L);
+ NVRM_SET_OSC_CLOCK(I2C2, I2C2, H);
+ NVRM_SET_OSC_CLOCK(DVC_I2C, DVC_I2C, H);
+
+ NVRM_SET_OSC_CLOCK(PWM, PWM, L);
+ NVRM_SET_OSC_CLOCK(XIO, XIO, H);
+ NVRM_SET_OSC_CLOCK(TWC, TWC, L);
+ NVRM_SET_OSC_CLOCK(HSMMC, HSMMC, L);
+
+ NVRM_SET_OSC_CLOCK(VFIR, UARTB, L);
+ NVRM_SET_OSC_CLOCK(UARTA, UARTA, L);
+ NVRM_SET_OSC_CLOCK(UARTB, UARTB, L);
+ NVRM_SET_OSC_CLOCK(UARTC, UARTC, H);
+
+ NVRM_SET_OSC_CLOCK(NDFLASH, NDFLASH, L);
+ NVRM_SET_OSC_CLOCK(IDE, IDE, L);
+ NVRM_SET_OSC_CLOCK(MIPI, MIPI, H);
+ NVRM_SET_OSC_CLOCK(SDIO1, SDIO1, L);
+ NVRM_SET_OSC_CLOCK(SDIO2, SDIO2, L);
+
+ NVRM_SET_OSC_CLOCK(SPI1, SPI1, H);
+ NVRM_SET_OSC_CLOCK(SBC1, SBC1, H);
+ NVRM_SET_OSC_CLOCK(SBC2, SBC2, H);
+ NVRM_SET_OSC_CLOCK(SBC3, SBC3, H);
+
+ NVRM_SET_OSC_CLOCK(DISP1, DISP1, L);
+ NVRM_SET_OSC_CLOCK(DISP2, DISP2, L);
+ NVRM_SET_OSC_CLOCK(TVO, TVO, H);
+ NVRM_SET_OSC_CLOCK(CVE, TVO, H);
+ NVRM_SET_OSC_CLOCK(HDMI, HDMI, H);
+ NVRM_SET_OSC_CLOCK(TVDAC, TVDAC, H);
+
+ // Special case SPDIF (set OUT on OSC; IN on PLLP_OUT0/(1+10/2))
+ if (RstOutL & CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_SPDIF_RST_FIELD)
+ {
+ reg = NV_DRF_DEF(CLK_RST_CONTROLLER, CLK_SOURCE_SPDIF,
+ SPDIFOUT_CLK_SRC, CLK_M) |
+ NV_DRF_DEF(CLK_RST_CONTROLLER, CLK_SOURCE_SPDIF,
+ SPDIFIN_CLK_SRC, PLLP_OUT0) |
+ NV_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_SPDIF,
+ SPDIFIN_CLK_DIVISOR, 10);
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_0, reg);
+ }
+
+ /*
+ * For graphic clocks use PLLM_OUT0 (max 400MHz) as a source, and set
+ * divider so that initial frequency is below maximum module limit
+ * (= PLLM_OUT0 / (1 + DIVIDER/2)
+ */
+ #define G_DIVIDER (2)
+ NVRM_CONFIG_CLOCK(HOST1X, PLLM_OUT0, G_DIVIDER);
+ NVRM_CONFIG_CLOCK(EPP, PLLM_OUT0, G_DIVIDER);
+ NVRM_CONFIG_CLOCK(G2D, PLLM_OUT0, G_DIVIDER);
+ NVRM_CONFIG_CLOCK(G3D, PLLM_OUT0, G_DIVIDER);
+ NVRM_CONFIG_CLOCK(MPE, PLLM_OUT0, G_DIVIDER);
+ #define VI_DIVIDER (4)
+ NVRM_CONFIG_CLOCK(VI, PLLM_OUT0, VI_DIVIDER);
+ NVRM_CONFIG_CLOCK(VI_SENSOR, PLLM_OUT0, VI_DIVIDER);
+
+ NvOsWaitUS(NVRM_RESET_DELAY);
+ }
+ // Make sure Host1x clock will be kept enabled
+ ClkOutL = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, CLK_OUT_ENB_L,
+ CLK_ENB_HOST1X, ENABLE, ClkOutL);
+ // Make sure VDE, BSEV and BSEA clocks will be kept disabled
+ ClkOutH = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, CLK_OUT_ENB_H,
+ CLK_ENB_VDE, DISABLE, ClkOutH);
+ ClkOutH = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, CLK_OUT_ENB_H,
+ CLK_ENB_BSEV, DISABLE, ClkOutH);
+ ClkOutH = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, CLK_OUT_ENB_H,
+ CLK_ENB_BSEA, DISABLE, ClkOutH);
+
+ /* write rst_devices_l */
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0, 0 );
+
+ /* write rst_devies_h */
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0, 0 );
+
+ // restore clock enable state (= disable those clocks that
+ // were disabled on boot)
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0, ClkOutL );
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0, ClkOutH );
+
+ /* enable hdcp and macrovision */
+ NvRmPrivContentProtectionFuses( rm );
+
+ // 10jun2008: jn turning this back on. Still need to solve the QT
+ // issue.
+ // FIXME: On Quickturn and FPGA we are using normal sdram, not mobile
+ // ram. Need some way to determine if we have normal sdram
+ // or mobile sdram. Actually these bits should be set by BCT.
+ reg = NV_REGR(rm, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_CFG_0);
+ reg = NV_FLD_SET_DRF_DEF(EMC, CFG, DRAM_CLKSTOP, ENABLED, reg);
+ reg = NV_FLD_SET_DRF_DEF(EMC, CFG, DRAM_ACPD, ACTIVE_POWERDOWN, reg);
+ NV_REGW( rm, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_CFG_0, reg);
+
+ // Enable stop clock to CPU, while it is halted
+ reg = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_MASK_ARM_0);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_MASK_ARM,
+ CLK_MASK_CPU_HALT, 1, reg );
+ NV_REGW(rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_MASK_ARM_0, reg);
+
+ if (rm->ChipId.Id == 0x16)
+ {
+ NvU32 Reg = 0;
+ // If USB main clock source is not enabled then disable the clocks to USB0 and USB1
+ Reg = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0, CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0);
+ if (!NV_DRF_VAL(CLK_RST_CONTROLLER, CLK_OUT_ENB_L, CLK_ENB_USBD, Reg))
+ {
+ // Disable clocks for USB1 and USB2 controllers.Should be enabled on need basis.
+ Reg = NV_REGR(rm, NvRmModuleID_Misc, 0, APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ Reg = NV_FLD_SET_DRF_DEF(APB_MISC_PP,MISC_USB_CLK_RST_CTL, MISC_USB_CE, DISABLE, Reg);
+ Reg = NV_FLD_SET_DRF_DEF(APB_MISC_PP,MISC_USB_CLK_RST_CTL, MISC_USB2_CE, DISABLE, Reg);
+ NV_REGW(rm, NvRmModuleID_Misc, 0, APB_MISC_PP_MISC_USB_CLK_RST_CTL_0, Reg);
+ }
+ }
+
+#endif // !NV_OAL
+}
+
+static void
+NvRmPrivAp15GetBondOut( NvRmDeviceHandle hDevice,
+ const NvU32 **pTable,
+ NvU32 *bondOut )
+{
+ *pTable = s_Ap15BondOutTable;
+ bondOut[0] = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_BOND_OUT_L_0);
+ bondOut[1] = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_BOND_OUT_H_0);
+}
+
+
+#define NVRM_MAX_BOND_OUT_REG 3
+
+/*
+ * Check BondOut register to determine which module and/or module instance
+ * is not available.
+ */
+void
+NvRmPrivCheckBondOut( NvRmDeviceHandle hDevice )
+{
+ NvRmModuleTable *mod_table = 0;
+ NvRmModule *modules = 0;
+ NvRmModuleInstance *instance = 0;
+ NvRmChipId *id = 0;
+ NvU32 bondOut[NVRM_MAX_BOND_OUT_REG] = {0, 0, 0};
+ NvU32 j, i, k;
+ const NvU32 *table = NULL;
+ NvU8 *pb = NULL;
+ NvU8 val;
+
+ NV_ASSERT( hDevice );
+
+ id = NvRmPrivGetChipId( hDevice );
+ switch (id->Id)
+ {
+ case 0x15:
+ case 0x16:
+ NvRmPrivAp15GetBondOut(hDevice, &table, bondOut);
+ break;
+ case 0x20:
+ NvRmPrivAp20GetBondOut(hDevice, &table, bondOut);
+ break;
+ default:
+ return; // no support
+ }
+
+ if ( !bondOut[0] && !bondOut[1] && !bondOut[2] )
+ return;
+
+ mod_table = NvRmPrivGetModuleTable( hDevice );
+ modules = mod_table->Modules;
+
+ for ( i = 0, j = 0; j < NVRM_MAX_BOND_OUT_REG; j++ )
+ {
+ if ( !bondOut[j] )
+ {
+ i += 32; // skip full 32-bit
+ continue;
+ }
+ pb = (NvU8 *)&bondOut[j];
+ for ( k = 0; k < 4; k++ )
+ {
+ val = *pb++;
+ if ( !val )
+ {
+ i += 8;
+ continue;
+ }
+ for( ; ; )
+ {
+ if ( val & 1 )
+ {
+ NvU32 moduleIdInst = table[i];
+ if ( NVRM_DEVICE_UNKNOWN != moduleIdInst )
+ {
+ if ( NvSuccess == NvRmPrivGetModuleInstance(hDevice,
+ moduleIdInst, &instance) )
+ {
+ /* Mark instance's DevIdx to invalid value -1. if all
+ instances for the module are invalid, mark the module
+ itself INVALID.
+ Keep instance->DeviceId to maintain instance ordering
+ since we could be bonding out, say, UARTA but UARTB and
+ UARTC still available. */
+ NvRmModuleID moduleId =
+ NVRM_MODULE_ID_MODULE( moduleIdInst );
+ instance->DevIdx = (NvU8)-1;
+ if (0 == NvRmModuleGetNumInstances( hDevice, moduleId ))
+ modules[moduleId].Index = NVRM_MODULE_INVALID;
+ }
+ }
+ }
+ val = val >> 1; // Use ARM's clz?
+ if ( !val )
+ {
+ i = (i + 7) & ~7; // skip till next byte
+ break;
+ }
+ i++;
+ }
+ }
+ }
+}
+
+/*****************************************************************************/
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clocks.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clocks.c
new file mode 100644
index 000000000000..8c428b93f85a
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clocks.c
@@ -0,0 +1,924 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvassert.h"
+#include "nvrm_clocks.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_module.h"
+#include "nvrm_drf.h"
+#include "ap15/aremc.h"
+#include "ap15/arclk_rst.h"
+#include "ap15/arapbpm.h"
+#include "ap16/arapb_misc.h"
+#include "ap15rm_clocks.h"
+#include "ap15rm_private.h"
+
+
+
+/*****************************************************************************/
+
+static void NvRmPrivWaitUS(
+ NvRmDeviceHandle hDevice,
+ NvU32 usec)
+{
+ NvU32 t, start;
+
+ start = NV_REGR(hDevice, NvRmModuleID_TimerUs, 0, 0);
+ for (;;)
+ {
+ t = NV_REGR(hDevice, NvRmModuleID_TimerUs, 0, 0);
+ if ( ((NvU32)(t - start)) >= usec )
+ break;
+ }
+}
+
+#define CLOCK_ENABLE( rm, offset, field, EnableState ) \
+ do { \
+ regaddr = (CLK_RST_CONTROLLER_##offset##_0); \
+ NvOsMutexLock((rm)->CarMutex); \
+ reg = NV_REGR((rm), NvRmPrivModuleID_ClockAndReset, 0, regaddr); \
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, offset, field, EnableState, reg); \
+ NV_REGW((rm), NvRmPrivModuleID_ClockAndReset, 0, regaddr, reg); \
+ NvOsMutexUnlock((rm)->CarMutex); \
+ } while( 0 )
+
+/*****************************************************************************/
+void
+Ap15EnableModuleClock(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId,
+ ModuleClockState ClockState)
+{
+ // Extract module and instance from composite module id.
+ NvU32 Module = NVRM_MODULE_ID_MODULE( ModuleId );
+ NvU32 Instance = NVRM_MODULE_ID_INSTANCE( ModuleId );
+ NvU32 reg;
+ NvU32 regaddr;
+
+ if (ClockState == ModuleClockState_Enable)
+ {
+ NvRmPrivConfigureClockSource(hDevice, ModuleId, NV_TRUE);
+ }
+
+ switch ( Module ) {
+ case NvRmModuleID_CacheMemCtrl:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_CACHE1, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_CACHE2, ClockState );
+ }
+ break;
+ case NvRmModuleID_Vcp:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_VCP, ClockState );
+ break;
+ case NvRmModuleID_GraphicsHost:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_HOST1X, ClockState );
+ break;
+ case NvRmModuleID_Display:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_DISP1, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_DISP2, ClockState );
+ }
+ break;
+ case NvRmModuleID_Ide:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_IDE, ClockState );
+ break;
+ case NvRmModuleID_3D:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_3D, ClockState );
+ break;
+ case NvRmModuleID_Isp:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_ISP, ClockState );
+ break;
+ case NvRmModuleID_Usb2Otg:
+ NV_ASSERT( Instance < 2 );
+ if ((hDevice->ChipId.Id == 0x16) && (ClockState == NV_FALSE))
+ {
+ NvU32 RegVal = 0;
+ // On AP16 USB clock source is shared for both USB controllers
+ // Disabling the main clock source will disable both controllers
+ // when disabling the clock make sure that both controllers are disabled.
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+
+ if (!(NV_DRF_VAL(APB_MISC_PP, MISC_USB_CLK_RST_CTL, MISC_USB_CE, RegVal)) &&
+ !(NV_DRF_VAL(APB_MISC_PP, MISC_USB_CLK_RST_CTL, MISC_USB2_CE, RegVal)) )
+ {
+ /// Disable USBD clock for both the instances 0 and 1
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_USBD, ClockState );
+ }
+ }
+ else
+ {
+ /// Enable/Disable USBD clock
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_USBD, ClockState );
+ }
+ break;
+ case NvRmModuleID_2D:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_2D, ClockState );
+ break;
+ case NvRmModuleID_Epp:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_EPP, ClockState );
+ break;
+ case NvRmModuleID_Vi:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_VI, ClockState );
+ break;
+ case NvRmModuleID_I2s:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_I2S1, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_I2S2, ClockState );
+ }
+ break;
+ case NvRmModuleID_Hsmmc:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_HSMMC, ClockState );
+ break;
+ case NvRmModuleID_Twc:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_TWC, ClockState );
+ break;
+ case NvRmModuleID_Pwm:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_PWM, ClockState );
+ break;
+ case NvRmModuleID_Sdio:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_SDIO1, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_SDIO2, ClockState );
+ }
+ break;
+ case NvRmModuleID_Spdif:
+ NV_ASSERT( Instance < 1 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_SPDIF, ClockState );
+ break;
+ case NvRmModuleID_Nand:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_NDFLASH, ClockState );
+ break;
+ case NvRmModuleID_I2c:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_I2C1, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_I2C2, ClockState );
+ }
+ break;
+ case NvRmPrivModuleID_Gpio:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_GPIO, ClockState );
+ break;
+ case NvRmModuleID_Uart:
+ NV_ASSERT( Instance < 3 );
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_UARTA, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_UARTB, ClockState );
+ }
+ else if ( Instance == 2)
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_UARTC, ClockState );
+ }
+ break;
+ case NvRmModuleID_Vfir:
+ // Same as UARTB
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_UARTB, ClockState );
+ break;
+ case NvRmModuleID_Ac97:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_AC97, ClockState );
+ break;
+ case NvRmModuleID_Rtc:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_RTC, ClockState );
+ break;
+ case NvRmModuleID_Timer:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_TMR, ClockState );
+ break;
+ case NvRmModuleID_BseA:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_BSEA, ClockState );
+ break;
+ case NvRmModuleID_Vde:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_VDE, ClockState );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_BSEV, ClockState );
+ break;
+ case NvRmModuleID_Mpe:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_MPE, ClockState );
+ break;
+ case NvRmModuleID_Tvo:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_TVO, ClockState );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_TVDAC, ClockState );
+ break;
+ case NvRmModuleID_Csi:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_CSI, ClockState );
+ break;
+ case NvRmModuleID_Hdmi:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_HDMI, ClockState );
+ break;
+ case NvRmModuleID_Mipi:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_MIPI, ClockState );
+ break;
+ case NvRmModuleID_Dsi:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_DSI, ClockState );
+ break;
+ case NvRmModuleID_Xio:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_XIO, ClockState );
+ break;
+ case NvRmModuleID_Spi:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_SPI1, ClockState );
+ break;
+ case NvRmModuleID_Fuse:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_FUSE, ClockState );
+ break;
+ case NvRmModuleID_Slink:
+ // Supporting only the slink controller.
+ NV_ASSERT( Instance < 3 );
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_SBC1, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_SBC2, ClockState );
+ }
+ else if ( Instance == 2)
+ {
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_SBC3, ClockState );
+ }
+ break;
+ case NvRmModuleID_Dvc:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_DVC_I2C, ClockState );
+ break;
+ case NvRmModuleID_Pmif:
+ NV_ASSERT( Instance == 0 );
+ // PMC clock must not be disabled
+ if (ClockState == ModuleClockState_Enable)
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_PMC, ClockState );
+ break;
+ case NvRmModuleID_SysStatMonitor:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_STAT_MON, ClockState );
+ break;
+ case NvRmModuleID_Kbc:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_KBC, ClockState );
+ break;
+ case NvRmPrivModuleID_ApbDma:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_APBDMA, ClockState );
+ break;
+ case NvRmPrivModuleID_MemoryController:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_MEM, ClockState );
+ break;
+ case NvRmPrivModuleID_ExternalMemoryController:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_EMC, ClockState );
+ CLOCK_ENABLE( hDevice, CLK_SOURCE_EMC, EMC_2X_CLK_ENB, ClockState );
+ CLOCK_ENABLE( hDevice, CLK_SOURCE_EMC, EMC_1X_CLK_ENB, ClockState );
+ break;
+ case NvRmModuleID_Cpu:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_L, CLK_ENB_CPU, ClockState );
+ break;
+ default:
+ NV_ASSERT(!" Unknown NvRmModuleID passed to Ap15EnableModuleClock(). ");
+ }
+
+ if (ClockState == ModuleClockState_Disable)
+ {
+ NvRmPrivConfigureClockSource(hDevice, ModuleId, NV_FALSE);
+ }
+}
+
+void
+Ap15EnableTvDacClock(
+ NvRmDeviceHandle hDevice,
+ ModuleClockState ClockState)
+{
+ NvU32 reg;
+ NvU32 regaddr;
+
+ CLOCK_ENABLE( hDevice, CLK_OUT_ENB_H, CLK_ENB_TVDAC, ClockState );
+}
+
+/*****************************************************************************/
+
+ // Note that VDE has different reset sequence requirement
+ // FIMXE: NV blocks - hot reset issues
+ #define RESET( rm, offset, field, delay ) \
+ do { \
+ regaddr = (CLK_RST_CONTROLLER_##offset##_0); \
+ NvOsMutexLock((rm)->CarMutex); \
+ reg = NV_REGR((rm), NvRmPrivModuleID_ClockAndReset, 0, regaddr); \
+ reg = NV_FLD_SET_DRF_NUM( \
+ CLK_RST_CONTROLLER, offset, field, 1, reg); \
+ NV_REGW((rm), NvRmPrivModuleID_ClockAndReset, 0, regaddr, reg); \
+ if (Hold) \
+ {\
+ NvOsMutexUnlock((rm)->CarMutex); \
+ break; \
+ }\
+ NvRmPrivWaitUS( (rm), (delay) ); \
+ reg = NV_FLD_SET_DRF_NUM( \
+ CLK_RST_CONTROLLER, offset, field, 0, reg); \
+ NV_REGW((rm), NvRmPrivModuleID_ClockAndReset, 0, regaddr, reg); \
+ NvOsMutexUnlock((rm)->CarMutex); \
+ } while( 0 )
+
+// KBC reset is available in the pmc control register.
+#define RESET_KBC( rm, delay ) \
+ do { \
+ regaddr = (APBDEV_PMC_CNTRL_0); \
+ NvOsMutexLock((rm)->CarMutex); \
+ reg = NV_REGR((rm), NvRmModuleID_Pmif, 0, regaddr); \
+ reg = NV_FLD_SET_DRF_DEF(APBDEV_PMC, CNTRL, KBC_RST, ENABLE, reg); \
+ NV_REGW((rm), NvRmModuleID_Pmif, 0, regaddr, reg); \
+ if (Hold) \
+ {\
+ NvOsMutexUnlock((rm)->CarMutex); \
+ break; \
+ }\
+ NvRmPrivWaitUS( (rm), (delay) ); \
+ reg = NV_FLD_SET_DRF_DEF(APBDEV_PMC, CNTRL, KBC_RST, DISABLE, reg); \
+ NV_REGW((rm), NvRmModuleID_Pmif, 0, regaddr, reg); \
+ NvOsMutexUnlock((rm)->CarMutex); \
+ } while( 0 )
+
+
+// Use PMC control to reset the entire SoC. Just wait forever after reset is
+// issued - h/w would auto-clear it and restart SoC
+#define RESET_SOC( rm ) \
+ do { \
+ regaddr = (APBDEV_PMC_CNTRL_0); \
+ reg = NV_REGR((rm), NvRmModuleID_Pmif, 0, regaddr); \
+ reg = NV_FLD_SET_DRF_DEF(APBDEV_PMC, CNTRL, MAIN_RST, ENABLE, reg); \
+ NV_REGW((rm), NvRmModuleID_Pmif, 0, regaddr, reg); \
+ for (;;) ; \
+ } while( 0 )
+
+
+void AP15ModuleReset(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId,
+ NvBool Hold)
+{
+ // Extract module and instance from composite module id.
+ NvU32 Module = NVRM_MODULE_ID_MODULE( ModuleId );
+ NvU32 Instance = NVRM_MODULE_ID_INSTANCE( ModuleId );
+ NvU32 reg;
+ NvU32 regaddr;
+
+ switch( Module ) {
+ case NvRmPrivModuleID_MemoryController:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_MEM_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Kbc:
+ NV_ASSERT( Instance == 0 );
+ RESET_KBC(hDevice, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_SysStatMonitor:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_STAT_MON_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Pmif:
+ NV_ASSERT( Instance == 0 );
+ NV_ASSERT(!"PMC reset is not allowed, and does nothing on AP15");
+ // RESET( hDevice, RST_DEVICES_H, SWR_PMC_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Fuse:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_FUSE_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Slink:
+ // Supporting only the slink controller.
+ NV_ASSERT( Instance < 3 );
+ if( Instance == 0 )
+ {
+ RESET( hDevice, RST_DEVICES_H, SWR_SBC1_RST, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, RST_DEVICES_H, SWR_SBC2_RST, NVRM_RESET_DELAY );
+ }
+ else if ( Instance == 2)
+ {
+ RESET( hDevice, RST_DEVICES_H, SWR_SBC3_RST, NVRM_RESET_DELAY );
+ }
+ break;
+ case NvRmModuleID_Spi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_SPI1_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Xio:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_XIO_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Dvc:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_DVC_I2C_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Dsi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_DSI_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Tvo:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_TVO_RST, NVRM_RESET_DELAY );
+ RESET( hDevice, RST_DEVICES_H, SWR_TVDAC_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Mipi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_MIPI_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Hdmi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_HDMI_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Csi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_CSI_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_I2c:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ RESET( hDevice, RST_DEVICES_L, SWR_I2C1_RST, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, RST_DEVICES_H, SWR_I2C2_RST, NVRM_RESET_DELAY );
+ }
+ break;
+ case NvRmModuleID_Mpe:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_MPE_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Vde:
+ NV_ASSERT( Instance == 0 );
+ {
+ NvU32 reg;
+ NvOsMutexLock(hDevice->CarMutex);
+ reg = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, RST_DEVICES_H,
+ SWR_VDE_RST, 1, reg);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0, reg);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, RST_DEVICES_H,
+ SWR_BSEV_RST, 1, reg);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0, reg);
+ if (Hold)
+ {
+ NvOsMutexUnlock(hDevice->CarMutex);
+ break;
+ }
+ NvRmPrivWaitUS( hDevice, NVRM_RESET_DELAY );
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, RST_DEVICES_H,
+ SWR_BSEV_RST, 0, reg);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0, reg);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, RST_DEVICES_H,
+ SWR_VDE_RST, 0, reg);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0, reg);
+ NvOsMutexUnlock(hDevice->CarMutex);
+ }
+ break;
+ case NvRmModuleID_BseA:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_BSEA_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Cpu:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_CPU_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Avp:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_COP_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmPrivModuleID_System:
+ // THIS WILL DO A FULL SYSTEM RESET
+ NV_ASSERT( Instance == 0 );
+ RESET_SOC(hDevice);
+ break;
+ case NvRmModuleID_Ac97:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_AC97_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Rtc:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_RTC_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Timer:
+ NV_ASSERT( Instance == 0 );
+ // Timer reset (which also affects microsecond timer) is not allowed
+ // RESET( hDevice, RST_DEVICES_L, SWR_TMR_RST, NVRM_RESET_DELAY );
+ NV_ASSERT(!"Timer reset is not allowed");
+ break;
+ case NvRmModuleID_Uart:
+ NV_ASSERT( Instance < 3 );
+ if( Instance == 0 )
+ {
+ RESET( hDevice, RST_DEVICES_L, SWR_UARTA_RST, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, RST_DEVICES_L, SWR_UARTB_RST, NVRM_RESET_DELAY );
+ }
+ else if ( Instance == 2)
+ {
+ RESET( hDevice, RST_DEVICES_H, SWR_UARTC_RST, NVRM_RESET_DELAY );
+ }
+ break;
+ case NvRmModuleID_Vfir:
+ // Same as UARTB
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_UARTB_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Sdio:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ RESET( hDevice, RST_DEVICES_L, SWR_SDIO1_RST, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, RST_DEVICES_L, SWR_SDIO2_RST, NVRM_RESET_DELAY );
+ }
+ break;
+ case NvRmModuleID_Spdif:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_SPDIF_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_I2s:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ RESET( hDevice, RST_DEVICES_L, SWR_I2S1_RST, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, RST_DEVICES_L, SWR_I2S2_RST, NVRM_RESET_DELAY );
+ }
+ break;
+ case NvRmModuleID_Nand:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_NDFLASH_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Hsmmc:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_HSMMC_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Twc:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_TWC_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Pwm:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_PWM_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Epp:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_EPP_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Vi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_VI_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_3D:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_3D_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_2D:
+ NV_ASSERT( Instance == 0 );
+ // RESET( hDevice, RST_DEVICES_L, SWR_2D_RST, NVRM_RESET_DELAY );
+ // WAR for bug 364497, se also NvRmPrivAP15Reset2D()
+ NV_ASSERT(!"2D reset after RM open is no longer allowed");
+ break;
+ case NvRmModuleID_Usb2Otg:
+ {
+#if !NV_OAL
+ NvU32 RegVal = 0;
+ NV_ASSERT( Instance < 2 );
+ if (hDevice->ChipId.Id == 0x16)
+ {
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ if (!(NV_DRF_VAL(APB_MISC_PP, MISC_USB_CLK_RST_CTL, MISC_USB_CE, RegVal)) &&
+ !(NV_DRF_VAL(APB_MISC_PP, MISC_USB_CLK_RST_CTL, MISC_USB2_CE, RegVal)) )
+ {
+ /// Reset USBD if USB1/USB2 is not enabled already
+ RESET( hDevice, RST_DEVICES_L, SWR_USBD_RST, NVRM_RESET_DELAY );
+ }
+ }
+ else
+ {
+ /// Reset USBD
+ RESET( hDevice, RST_DEVICES_L, SWR_USBD_RST, NVRM_RESET_DELAY );
+ }
+#else
+ /// Reset USBD
+ RESET( hDevice, RST_DEVICES_L, SWR_USBD_RST, NVRM_RESET_DELAY );
+#endif
+ }
+ break;
+ case NvRmModuleID_Isp:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_ISP_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Ide:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_IDE_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Display:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ RESET( hDevice, RST_DEVICES_L, SWR_DISP1_RST, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, RST_DEVICES_L, SWR_DISP2_RST, NVRM_RESET_DELAY );
+ }
+ break;
+ case NvRmModuleID_Vcp:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_VCP_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_CacheMemCtrl:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ RESET( hDevice, RST_DEVICES_L, SWR_CACHE1_RST, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, RST_DEVICES_L, SWR_CACHE2_RST, NVRM_RESET_DELAY );
+ }
+ break;
+ case NvRmPrivModuleID_ApbDma:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_H, SWR_APBDMA_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmPrivModuleID_Gpio:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_GPIO_RST, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_GraphicsHost:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, RST_DEVICES_L, SWR_HOST1X_RST, NVRM_RESET_DELAY );
+ break;
+ default:
+ NV_ASSERT(!"Invalid ModuleId");
+ }
+
+ #undef RESET
+}
+
+/*****************************************************************************/
+
+void
+NvRmPrivAp15Reset2D(NvRmDeviceHandle hRmDevice)
+{
+#if !NV_OAL
+ NvU32 reg, offset;
+ /*
+ * WAR for bug 364497: 2D can not be taken out of reset if VI clock is
+ * running. Therefore, make sure VI clock is disabled and reset 2D here
+ * during RM initialization.
+ */
+ Ap15EnableModuleClock(hRmDevice, NvRmModuleID_Vi, ModuleClockState_Disable);
+
+ // Assert reset to 2D module
+ offset = CLK_RST_CONTROLLER_RST_DEVICES_L_0;
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, offset);
+ reg = NV_FLD_SET_DRF_DEF(
+ CLK_RST_CONTROLLER, RST_DEVICES_L, SWR_2D_RST, ENABLE, reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, offset, reg);
+
+ // Enable "known good" configuartion for 2D clock (PLLM divided by 2)
+ offset = CLK_RST_CONTROLLER_CLK_SOURCE_G2D_0;
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, offset,
+ (NV_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_G2D, G2D_CLK_DIVISOR, 2) |
+ NV_DRF_DEF(CLK_RST_CONTROLLER, CLK_SOURCE_G2D, G2D_CLK_SRC, PLLM_OUT0)));
+ Ap15EnableModuleClock(hRmDevice, NvRmModuleID_2D, ModuleClockState_Enable);
+ NvOsWaitUS(NVRM_RESET_DELAY);
+
+ // Take 2D out of reset and disable 2D clock. Both VI and 2D clocks are
+ // left disabled -it is up to the resepctive drivers to configure and enable
+ // them later.
+ offset = CLK_RST_CONTROLLER_RST_DEVICES_L_0;
+ reg = NV_FLD_SET_DRF_DEF(
+ CLK_RST_CONTROLLER, RST_DEVICES_L, SWR_2D_RST, DISABLE, reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, offset, reg);
+ Ap15EnableModuleClock(hRmDevice, NvRmModuleID_2D, ModuleClockState_Disable);
+#endif
+}
+
+void
+NvRmPrivAp15ClockConfigEx(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID Module,
+ NvU32 ClkSourceOffset,
+ NvU32 flags)
+{
+ NvU32 reg;
+
+ if ((Module == NvRmModuleID_Vi) &&
+ (!(flags & NvRmClockConfig_SubConfig)) &&
+ (flags & (NvRmClockConfig_InternalClockForPads |
+ NvRmClockConfig_ExternalClockForPads |
+ NvRmClockConfig_InternalClockForCore |
+ NvRmClockConfig_ExternalClockForCore)))
+ {
+#ifdef CLK_RST_CONTROLLER_CLK_SOURCE_VI_0_PD2VI_CLK_SEL_FIELD
+ reg = NV_REGR(
+ hDevice, NvRmPrivModuleID_ClockAndReset, 0, ClkSourceOffset);
+
+ /* Default is pads use External and Core use internal */
+ reg = NV_FLD_SET_DRF_NUM(
+ CLK_RST_CONTROLLER, CLK_SOURCE_VI, PD2VI_CLK_SEL, 0, reg);
+ reg = NV_FLD_SET_DRF_NUM(
+ CLK_RST_CONTROLLER, CLK_SOURCE_VI, VI_CLK_SEL, 0, reg);
+
+ /* This is an invalid setting. */
+ NV_ASSERT(!((flags & NvRmClockConfig_InternalClockForPads) &&
+ (flags & NvRmClockConfig_ExternalClockForCore)));
+
+ if (flags & NvRmClockConfig_InternalClockForPads)
+ reg = NV_FLD_SET_DRF_NUM(
+ CLK_RST_CONTROLLER, CLK_SOURCE_VI, PD2VI_CLK_SEL, 1, reg);
+ if (flags & NvRmClockConfig_ExternalClockForCore)
+ reg = NV_FLD_SET_DRF_NUM(
+ CLK_RST_CONTROLLER, CLK_SOURCE_VI, VI_CLK_SEL, 1, reg);
+
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ ClkSourceOffset, reg);
+#endif
+ }
+ if (Module == NvRmModuleID_I2s)
+ {
+ reg = NV_REGR(
+ hDevice, NvRmPrivModuleID_ClockAndReset, 0, ClkSourceOffset);
+
+ if (flags & NvRmClockConfig_ExternalClockForCore)
+ {
+ // Set I2S in slave mode (field definition is the same for I2S1 and I2S2)
+ reg = NV_FLD_SET_DRF_NUM(
+ CLK_RST_CONTROLLER, CLK_SOURCE_I2S1, I2S1_MASTER_CLKEN, 0, reg);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ ClkSourceOffset, reg);
+ }
+ else if (flags & NvRmClockConfig_InternalClockForCore)
+ {
+ // Set I2S in master mode (field definition is the same for I2S1 and I2S2)
+ reg = NV_FLD_SET_DRF_NUM(
+ CLK_RST_CONTROLLER, CLK_SOURCE_I2S1, I2S1_MASTER_CLKEN, 1, reg);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ ClkSourceOffset, reg);
+ }
+ }
+}
+
+void NvRmPrivAp15SimPllInit(NvRmDeviceHandle hRmDevice)
+{
+ NvU32 RegData;
+
+ //Enable the plls in simulation. We can just use PLLC as the template
+ //and replicate across pllM and pllP since the offsets are the same.
+ RegData = NV_DRF_NUM (CLK_RST_CONTROLLER, PLLC_BASE, PLLC_DIVP, 0)
+ | NV_DRF_NUM (CLK_RST_CONTROLLER, PLLC_BASE, PLLC_DIVM, 0)
+ | NV_DRF_NUM (CLK_RST_CONTROLLER, PLLC_BASE, PLLC_DIVN, 0)
+ | NV_DRF_DEF (CLK_RST_CONTROLLER, PLLC_BASE, PLLC_BYPASS, DISABLE)
+ | NV_DRF_DEF (CLK_RST_CONTROLLER, PLLC_BASE, PLLC_ENABLE, ENABLE) ;
+
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_PLLM_BASE_0, RegData);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_PLLC_BASE_0, RegData);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_PLLP_BASE_0, RegData);
+}
+
+NvError
+NvRmPrivAp15OscDoublerConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz OscKHz)
+{
+ NvU32 reg, Taps;
+ NvError error = NvRmPrivGetOscDoublerTaps(hRmDevice, OscKHz, &Taps);
+
+ if (error == NvSuccess)
+ {
+ // Program delay
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_PROG_DLY_CLK_0);
+ reg = NV_FLD_SET_DRF_NUM(
+ CLK_RST_CONTROLLER, PROG_DLY_CLK, CLK_D_DELCLK_SEL, Taps, reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_PROG_DLY_CLK_0, reg);
+ // Enable doubler
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0);
+ reg = NV_FLD_SET_DRF_NUM(
+ CLK_RST_CONTROLLER, MISC_CLK_ENB, CLK_M_DOUBLER_ENB, 1, reg);
+ }
+ else
+ {
+ // Disable doubler
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0);
+ reg = NV_FLD_SET_DRF_NUM(
+ CLK_RST_CONTROLLER, MISC_CLK_ENB, CLK_M_DOUBLER_ENB, 0, reg);
+ }
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0, reg);
+ return error;
+}
+
+/*****************************************************************************/
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clocks.h b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clocks.h
new file mode 100644
index 000000000000..92f4650ae19c
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clocks.h
@@ -0,0 +1,446 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_AP15RM_CLOCKS_H
+#define INCLUDED_AP15RM_CLOCKS_H
+
+#include "nvrm_clocks.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+extern const NvRmModuleClockInfo g_Ap15ModuleClockTable[];
+extern const NvU32 g_Ap15ModuleClockTableSize;
+
+// PLLM ratios for graphic clocks
+#define NVRM_PLLM_HOST_SPEED_RATIO (4)
+#define NVRM_PLLM_2D_LOW_SPEED_RATIO (3)
+#define NVRM_PLLM_2D_HIGH_SPEED_RATIO (2)
+
+/**
+ * Defines frequency steps derived from PLLP0 fixed output to be used as System
+ * clock source frequency. The frequency specified in kHz, and it will be rounded
+ * up to the closest divider output.
+ */
+#define NVRM_AP15_PLLP_POLICY_SYSTEM_CLOCK \
+ PLLP_POLICY_ENTRY(54000) /* PLLP divider 6, output frequency 54,000kHz */ \
+ PLLP_POLICY_ENTRY(72000) /* PLLP divider 4, output frequency 72,000kHz */ \
+ PLLP_POLICY_ENTRY(108000) /* PLLP divider 2, output frequency 108,000kHz */ \
+ PLLP_POLICY_ENTRY(144000) /* PLLP divider 1, output frequency 144,000kHz */ \
+ PLLP_POLICY_ENTRY(216000) /* PLLP divider 0, output frequency 216,000kHz */
+
+/**
+ * Defines frequency steps derived from PLLP0 fixed output to be used as CPU
+ * clock source frequency. The frequency specified in kHz, and it will be rounded
+ * up to the closest divider output.
+ */
+#define NVRM_AP15_PLLP_POLICY_CPU_CLOCK \
+ PLLP_POLICY_ENTRY(24000) /* PLLP divider 16, output frequency 24,000kHz */ \
+ PLLP_POLICY_ENTRY(54000) /* PLLP divider 6, output frequency 54,000kHz */ \
+ PLLP_POLICY_ENTRY(108000) /* PLLP divider 2, output frequency 108,000kHz */ \
+ PLLP_POLICY_ENTRY(216000) /* PLLP divider 0, output frequency 216,000kHz */ \
+
+/**
+ * Combines EMC 2x frequency and the respective set of EMC timing parameters for
+ * pre-defined EMC configurations (DDR clock is running at EMC 1x frequency)
+ */
+typedef struct NvRmAp15EmcTimingConfigRec
+{
+ NvRmFreqKHz Emc2xKHz;
+ NvU32 Timing0Reg;
+ NvU32 Timing1Reg;
+ NvU32 Timing2Reg;
+ NvU32 Timing3Reg;
+ NvU32 Timing4Reg;
+ NvU32 Timing5Reg;
+ NvU32 FbioCfg6Reg;
+ NvU32 FbioDqsibDly;
+ NvU32 FbioQuseDly;
+ NvU32 Emc2xDivisor;
+ NvRmFreqKHz McKHz;
+ NvU32 McDivisor;
+ NvU32 McClockSource;
+ NvRmFreqKHz CpuLimitKHz;
+ NvRmMilliVolts CoreVoltageMv;
+} NvRmAp15EmcTimingConfig;
+
+// Defines number of EMC frequency steps for DFS
+#define NVRM_AP15_DFS_EMC_FREQ_STEPS (5)
+
+// Dfines CPU and EMC ratio policy as
+// CpuKHz/CpuMax <= PolicyTabel[PLLM0/(2*EMC2xKHz)] / 256
+#define NVRM_AP15_CPU_EMC_RATIO_POLICY \
+ 256, 192, 144, 122, 108, 98, 91, 86, 81, 77
+
+/*****************************************************************************/
+
+/**
+ * Enables/disables module clock.
+ *
+ * @param hDevice The RM device handle.
+ * @param ModuleId Combined module ID and instance of the target module.
+ * @param ClockState Target clock state.
+ */
+void
+Ap15EnableModuleClock(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId,
+ ModuleClockState ClockState);
+
+// Separate API to control TVDAC clock independently of TVO
+// (when TVDAC is used for CRT)
+void
+Ap15EnableTvDacClock(
+ NvRmDeviceHandle hDevice,
+ ModuleClockState ClockState);
+
+/**
+ * Resets module (assert/delay/deassert reset signal) if the hold paramter is
+ * NV_FLASE. If the hols paramter is NV_TRUE, just assert the reset and return.
+ *
+ * @param hDevice The RM device handle.
+ * @param Module Combined module ID and instance of the target module.
+ * @param hold To hold or relese the reset.
+ */
+void AP15ModuleReset(NvRmDeviceHandle hDevice, NvRmModuleID ModuleId, NvBool hold);
+
+/*****************************************************************************/
+
+/**
+ * Initializes PLL references table.
+ *
+ * @param pPllReferencesTable A pointer to a pointer which this function sets
+ * to the PLL reference table base.
+ * @param pPllReferencesTableSize A pointer to a variable which this function
+ * sets to the PLL reference table size.
+ */
+void
+NvRmPrivAp15PllReferenceTableInit(
+ NvRmPllReference** pPllReferencesTable,
+ NvU32* pPllReferencesTableSize);
+
+/**
+ * Initializes EMC clocks configuration structures and tables.
+ *
+ * @param hRmDevice The RM device handle.
+ */
+void
+NvRmPrivAp15EmcConfigInit(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Resets 2D module.
+ *
+ * @param hRmDevice The RM device handle.
+ */
+void
+NvRmPrivAp15Reset2D(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Initializes clock source table.
+ *
+ * @return Pointer to the clock sources descriptor table.
+ */
+NvRmClockSourceInfo* NvRmPrivAp15ClockSourceTableInit(void);
+
+/**
+ * Sets "as is" specified PLL configuration: switches PLL in bypass mode,
+ * changes PLL settings, waits for PLL stabilization, and switches to PLL
+ * output.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the PLL description structure.
+ * @param M PLL input divider setting.
+ * @param N PLL feedback divider setting.
+ * @param P PLL output divider setting.
+ * PLL is left disabled (not bypassed) if either M or N setting is zero:
+ * M = 0 or N = 0; otherwise, M, N, P validation is caller responsibility.
+ * @param StableDelayUs PLL stabilization delay in microseconds. If specified
+ * value is above guaranteed stabilization time, the latter one is used.
+ * @param cpcon PLL charge pump control setting; ignored if TypicalControls
+ * is true.
+ * @param lfcon PLL loop filter control setting; ignored if TypicalControls
+ * is true.
+ * @param TypicalControls If true, both charge pump and loop filter parameters
+ * are ignored and typical controls that corresponds to specified M, N, P
+ * values will be set. If false, the cpcon and lfcon parameters are set; in
+ * this case parameter validation is caller responsibility.
+ * @param flags PLL specific flags. Thse flags are valid only for some PLLs,
+ * see @NvRmPllConfigFlags.
+ */
+void
+NvRmPrivAp15PllSet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmPllClockInfo* pCinfo,
+ NvU32 M,
+ NvU32 N,
+ NvU32 P,
+ NvU32 StableDelayUs,
+ NvU32 cpcon,
+ NvU32 lfcon,
+ NvBool TypicalControls,
+ NvU32 flags);
+
+/**
+ * Configures output frequency for specified PLL.
+ *
+ * @param PllId Targeted PLL ID.
+ * @param MaxOutKHz Upper limit for PLL output frequency.
+ * @param pPllOutKHz A pointer to the requested PLL frequency on entry,
+ * and to the actually configured frequency on exit.
+ */
+void
+NvRmPrivAp15PllConfigureSimple(
+ NvRmDeviceHandle hRmDevice,
+ NvRmClockSource PllId,
+ NvRmFreqKHz MaxOutKHz,
+ NvRmFreqKHz* pPllOutKHz);
+
+/**
+ * Gets PLL output frequency.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the PLL description structure.
+ *
+ * @return PLL output frequency in kHz (reference frequency if PLL
+ * is by-passed; zero if PLL is disabled and not by-passed).
+ */
+NvRmFreqKHz
+NvRmPrivAp15PllFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmPllClockInfo* pCinfo);
+
+/**
+ * Gets frequencies of DFS controlled clocks
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pDfsKHz Output storage pointer for DFS clock frequencies structure
+ * (all frequencies returned in kHz).
+ */
+void
+NvRmPrivAp15DfsClockFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmDfsFrequencies* pDfsKHz);
+
+/**
+ * Configures DFS controlled clocks
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pMaxKHz Pointer to the DFS clock frequencies upper limits
+ * @param pDfsKHz Pointer to the target DFS frequencies structure on entry;
+ * updated with actual DFS clock frequencies on exit.
+ *
+ * @return NV_TRUE if clock configuration is completed; NV_FALSE if this
+ * function has to be called again to complete configuration.
+ */
+NvBool
+NvRmPrivAp15DfsClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDfsFrequencies* pMaxKHz,
+ NvRmDfsFrequencies* pDfsKHz);
+
+/**
+ * Gets maximum DFS domains frequencies that can be used at specified
+ * core voltage.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param TargetMv Targeted core voltage in mV.
+ * @param pDfsKHz Pointer to a structure filled in by this function with
+ * output clock frequencies.
+ */
+void
+NvRmPrivAp15DfsVscaleFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmMilliVolts TargetMv,
+ NvRmDfsFrequencies* pDfsKHz);
+
+/**
+ * Determines if module clock configuration requires AP15-specific handling,
+ * and configures the clock if yes.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the module clock descriptor.
+ * @param ClockSourceCount Number of module clock sources.
+ * @param MinFreq Requested minimum module clock frequency.
+ * @param MaxFreq Requested maximum module clock frequency.
+ * @param PrefFreqList Pointer to a list of preferred frequencies sorted
+ * in the decreasing order of priority.
+ * @param PrefCount Number of entries in the PrefFreqList array.
+ * @param pCstate Pointer to module state structure filled in if special
+ * handling is completed.
+ * @param flags Module specific flags
+ *
+ * @return True indicates that module clock is configured, and regular
+ * configuration should be aborted; False indicates that regular clock
+ * configuration should proceed.
+ */
+NvBool
+NvRmPrivAp15IsModuleClockException(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleClockInfo *pCinfo,
+ NvU32 ClockSourceCount,
+ NvRmFreqKHz MinFreq,
+ NvRmFreqKHz MaxFreq,
+ const NvRmFreqKHz* PrefFreqList,
+ NvU32 PrefCount,
+ NvRmModuleClockState* pCstate,
+ NvU32 flags);
+
+/**
+ * Configures EMC low-latency fifo for CPU clock source switch.
+ *
+ * @param hRmDevice The RM device handle.
+ */
+void
+NvRmPrivAp15SetEmcForCpuSrcSwitch(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Configures EMC low-latency fifo for CPU clock divider switch.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param CpuFreq Resulting CPU frequency after divider switch
+ * @param Before Specifies if this function is called before (True)
+ * or after (False) divider changes.
+ */
+void
+NvRmPrivAp15SetEmcForCpuDivSwitch(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz CpuFreq,
+ NvBool Before);
+
+/**
+ * Configures maximum core and memory clocks.
+ *
+ * @param hRmDevice The RM device handle.
+ */
+void
+NvRmPrivAp15FastClockConfig(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Gets module frequency synchronized with EMC speed.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param Module The target module ID.
+ *
+ * @return Module frequency in kHz.
+ */
+NvRmFreqKHz NvRmPrivAp15GetEmcSyncFreq(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module);
+
+/**
+ * Disables PLLs
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the last configured module clock descriptor.
+ * @param pCstate Pointer to the last configured module state structure.
+ */
+void
+NvRmPrivAp15DisablePLLs(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmModuleClockInfo* pCinfo,
+ const NvRmModuleClockState* pCstate);
+
+/**
+ * Turns PLLD (MIPI PLL) power On/Off
+ *
+ * @param hRmDevice The RM device handle.
+ * @param ConfigEntry NV_TRUE if this function is called before display
+ * clock configuration; NV_FALSE otherwise.
+ * @param Pointer to the current state of MIPI PLL power rail, updated
+ * by this function.
+ */
+void
+NvRmPrivAp15PllDPowerControl(
+ NvRmDeviceHandle hRmDevice,
+ NvBool ConfigEntry,
+ NvBool* pMipiPllVddOn);
+
+/**
+ * Clips EMC frequency high limit to one of the fixed DFS EMC configurations,
+ * and if necessary adjust CPU high limit respectively.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCpuHighKHz A pointer to the variable, which contains CPU frequency
+ * high limit in KHz (on entry - requested limit, on exit - clipped limit)
+ * @param pEmcHighKHz A pointer to the variable, which contains EMC frequency
+ * high limit in KHz (on entry - requested limit, on exit - clipped limit)
+ */
+void
+NvRmPrivAp15ClipCpuEmcHighLimits(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz* pCpuHighKHz,
+ NvRmFreqKHz* pEmcHighKHz);
+
+
+/**
+ * Configures some special bits in the clock source register for given module.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param Module Target module ID.
+ * @param ClkSourceOffset Clock source register offset.
+ * @param flags Module specific clock configuration flags.
+ */
+void
+NvRmPrivAp15ClockConfigEx(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module,
+ NvU32 ClkSourceOffset,
+ NvU32 flags);
+
+/**
+ * Enables PLL in simulation.
+ *
+ * @param hRmDevice The RM device handle.
+ */
+void NvRmPrivAp15SimPllInit(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Configures oscillator (main) clock doubler.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param OscKHz Oscillator (main) clock frequency in kHz.
+ *
+ * @return NvSuccess if the specified oscillator frequency is supported, and
+ * NvError_NotSupported, otherwise.
+ */
+NvError
+NvRmPrivAp15OscDoublerConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz OscKHz);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // INCLUDED_AP15RM_CLOCKS_H
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clocks_info.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clocks_info.c
new file mode 100644
index 000000000000..b63198419fd4
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_clocks_info.c
@@ -0,0 +1,1673 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvrm_drf.h"
+#include "ap15rm_clocks.h"
+#include "ap15rm_private.h"
+#include "ap15/arclk_rst.h"
+#include "ap15/project_relocation_table.h"
+
+#define NV_COMMON_CLK_RST_FIELDS_INFO(MODULE, H_L) \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##MODULE##_0, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##MODULE##_0_##MODULE##_CLK_SRC_DEFAULT_MASK, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##MODULE##_0_##MODULE##_CLK_SRC_SHIFT, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##MODULE##_0_##MODULE##_CLK_DIVISOR_DEFAULT_MASK, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##MODULE##_0_##MODULE##_CLK_DIVISOR_SHIFT, \
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_##H_L##_0, \
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_##H_L##_0_CLK_ENB_##MODULE##_FIELD, \
+ CLK_RST_CONTROLLER_RST_DEVICES_##H_L##_0, \
+ CLK_RST_CONTROLLER_RST_DEVICES_##H_L##_0_SWR_##MODULE##_RST_FIELD
+
+const NvRmModuleClockInfo g_Ap15ModuleClockTable[] =
+{
+ { /* Invalid module */
+ NvRmPrivModuleID_System, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ 0,0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_TRIG_SYS_RST_FIELD,
+ NvRmDiagModuleID_SystemReset
+ },
+ { /* VI controller module - VI clock */
+ NvRmModuleID_Vi, 0 , 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_0_VI_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_0_VI_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_0_VI_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_0_VI_CLK_DIVISOR_SHIFT,
+
+ // Combined VI and VI sensor reset and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_VI_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_VI_RST_FIELD,
+ NvRmDiagModuleID_Vi
+ },
+ { /* VI controller module - VI sensor clock
+ * Module sub clock must immediately follow main clock
+ */
+ NvRmModuleID_Vi, 0 , 1,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_SENSOR_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_SENSOR_0_VI_SENSOR_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_SENSOR_0_VI_SENSOR_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_SENSOR_0_VI_SENSOR_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_SENSOR_0_VI_SENSOR_CLK_DIVISOR_SHIFT,
+
+ // Combined VI and VI sensor reset and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_VI_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_VI_RST_FIELD,
+ NvRmDiagModuleID_ViSensor
+ },
+
+ { /* I2S1 controller module */
+ NvRmModuleID_I2s, 0, 0,
+ {
+ NvRmClockSource_PllA0,
+ NvRmClockSource_AudioSync,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(I2S1, L),
+ NvRmDiagModuleID_I2s
+ },
+
+ { /* I2S2 controller module */
+ NvRmModuleID_I2s, 1, 0,
+ {
+ NvRmClockSource_PllA0,
+ NvRmClockSource_AudioSync,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(I2S2, L),
+ NvRmDiagModuleID_I2s
+ },
+
+ { /* I2C1 controller module */
+ NvRmModuleID_I2c, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Integer_1,
+ NV_COMMON_CLK_RST_FIELDS_INFO(I2C1, L),
+ NvRmDiagModuleID_I2c
+ },
+
+ { /* I2C2 controller module */
+ NvRmModuleID_I2c, 1, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Integer_1,
+ NV_COMMON_CLK_RST_FIELDS_INFO(I2C2, H),
+ NvRmDiagModuleID_I2c
+ },
+
+ { /* Hsmmc controller module */
+ NvRmModuleID_Hsmmc, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(HSMMC, L),
+ NvRmDiagModuleID_Hsmmc
+ },
+
+ { /* S/PDIF controller module - S/PDIF OUT clock */
+ NvRmModuleID_Spdif, 0, 0,
+ {
+ NvRmClockSource_PllA0,
+ NvRmClockSource_AudioSync,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_0_SPDIFOUT_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_0_SPDIFOUT_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_0_SPDIFOUT_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_0_SPDIFOUT_CLK_DIVISOR_SHIFT,
+
+ // Combined SPDIF reset and and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_SPDIF_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_SPDIF_RST_FIELD,
+ NvRmDiagModuleID_Spdif
+ },
+ { /* S/PDIF controller module - S/PDIF IN clock
+ * Module sub clock must immediately follow main clock
+ */
+ NvRmModuleID_Spdif, 0, 1,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_0_SPDIFIN_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_0_SPDIFIN_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_0_SPDIFIN_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_0_SPDIFIN_CLK_DIVISOR_SHIFT,
+
+ // Combined SPDIF reset and and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_SPDIF_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_SPDIF_RST_FIELD,
+ NvRmDiagModuleID_SpdifIn
+ },
+
+ { /* PWM controller module */
+ NvRmModuleID_Pwm, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_AudioSync,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(PWM, L),
+ NvRmDiagModuleID_Pwm
+ },
+
+ { /* SPI controller module */
+ NvRmModuleID_Spi, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SPI1, H),
+ NvRmDiagModuleID_Spi
+ },
+
+ { /* SBC1 controller module */
+ NvRmModuleID_Slink, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SBC1, H),
+ NvRmDiagModuleID_Sbc
+ },
+
+ { /* SBC2 controller module */
+ NvRmModuleID_Slink, 1, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SBC2, H),
+ NvRmDiagModuleID_Sbc
+ },
+
+ { /* SBC3 controller module */
+ NvRmModuleID_Slink, 2, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SBC3, H),
+ NvRmDiagModuleID_Sbc
+ },
+
+ { /* SLC controller module */
+ NvRmModuleID_Invalid, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SLC1, H),
+ NvRmDiagModuleID_Slc
+ },
+
+ { /* TWC controller module */
+ NvRmModuleID_Twc, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(TWC, L),
+ NvRmDiagModuleID_Twc
+ },
+
+ { /* XIO controller module */
+ NvRmModuleID_Xio, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(XIO, H),
+ NvRmDiagModuleID_Xio
+ },
+
+ { /* IDE controller module */
+ NvRmModuleID_Ide, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(IDE, L),
+ NvRmDiagModuleID_Ide
+ },
+
+ { /* SDIO1 controller module */
+ NvRmModuleID_Sdio, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SDIO1, L),
+ NvRmDiagModuleID_Sdio
+ },
+
+ { /* SDIO2 controller module */
+ NvRmModuleID_Sdio, 1, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SDIO2, L),
+ NvRmDiagModuleID_Sdio
+ },
+
+ { /* NAND Flash controller module */
+ NvRmModuleID_Nand, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(NDFLASH, L),
+ NvRmDiagModuleID_NandFlash
+ },
+
+ { /* MIPI BB controller module */
+ NvRmModuleID_Mipi, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(MIPI, H),
+ NvRmDiagModuleID_MipiBaseband
+ },
+
+ { /* DVC controller module */
+ NvRmModuleID_Dvc, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Integer_1,
+ NV_COMMON_CLK_RST_FIELDS_INFO(DVC_I2C, H),
+ NvRmDiagModuleID_Dvc
+ },
+
+ { /* UARTA controller module */
+ NvRmModuleID_Uart, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_None,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTA_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTA_0_UARTA_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTA_0_UARTA_CLK_SRC_SHIFT,
+ 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_UARTA_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_UARTA_RST_FIELD,
+ NvRmDiagModuleID_Uart
+ },
+
+ { /* UARTB controller module */
+ NvRmModuleID_Uart, 1, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_None,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTB_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTB_0_UARTB_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTB_0_UARTB_CLK_SRC_SHIFT,
+ 0, 0,
+
+ // Combined UARTB and VFIR reset and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_UARTB_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_UARTB_RST_FIELD,
+ NvRmDiagModuleID_Uart
+ },
+
+ { /* UARTC controller module */
+ NvRmModuleID_Uart, 2, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_None,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTC_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTC_0_UARTC_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTC_0_UARTC_CLK_SRC_SHIFT,
+ 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_UARTC_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_UARTC_RST_FIELD,
+ NvRmDiagModuleID_Uart
+ },
+
+ { /* VFIR controller module */
+ NvRmModuleID_Vfir, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VFIR_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VFIR_0_VFIR_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VFIR_0_VFIR_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VFIR_0_VFIR_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VFIR_0_VFIR_CLK_DIVISOR_SHIFT,
+
+ // Combined UARTB and VFIR reset and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_UARTB_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_UARTB_RST_FIELD,
+ NvRmDiagModuleID_Vfir
+ },
+
+ { /* Host1x module */
+ NvRmModuleID_GraphicsHost, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(HOST1X, L),
+ NvRmDiagModuleID_Host1x
+ },
+
+ { /* EPP controller module */
+ NvRmModuleID_Epp, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(EPP, L),
+ NvRmDiagModuleID_Epp
+ },
+
+ { /* MPE controller module */
+ NvRmModuleID_Mpe, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(MPE, H),
+ NvRmDiagModuleID_Mpe
+ },
+
+ { /* 2D controller module */
+ NvRmModuleID_2D, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G2D_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G2D_0_G2D_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G2D_0_G2D_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G2D_0_G2D_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G2D_0_G2D_CLK_DIVISOR_SHIFT,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_2D_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_2D_RST_FIELD,
+ NvRmDiagModuleID_2d
+ },
+
+ { /* 3D controller module */
+ NvRmModuleID_3D, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G3D_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G3D_0_G3D_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G3D_0_G3D_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G3D_0_G3D_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G3D_0_G3D_CLK_DIVISOR_SHIFT,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_3D_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_3D_RST_FIELD,
+ NvRmDiagModuleID_3d
+ },
+
+ { /* Display 1 controller module */
+ NvRmModuleID_Display, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllD0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_None,
+ CLK_RST_CONTROLLER_CLK_SOURCE_DISP1_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_DISP1_0_DISP1_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_DISP1_0_DISP1_CLK_SRC_SHIFT,
+ 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_DISP1_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_DISP1_RST_FIELD,
+ NvRmDiagModuleID_Display
+ },
+
+ { /* Display 2 controller module */
+ NvRmModuleID_Display, 1, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllD0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_None,
+ CLK_RST_CONTROLLER_CLK_SOURCE_DISP2_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_DISP2_0_DISP2_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_DISP2_0_DISP2_CLK_SRC_SHIFT,
+ 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_DISP2_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_DISP2_RST_FIELD,
+ NvRmDiagModuleID_Display
+ },
+
+ { /* TVO controller module - TVO clock */
+ NvRmModuleID_Tvo, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllD0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVO_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVO_0_TVO_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVO_0_TVO_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVO_0_TVO_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVO_0_TVO_CLK_DIVISOR_SHIFT,
+
+ // Combined TVO, and CVE reset and and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_TVO_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_TVO_RST_FIELD,
+ NvRmDiagModuleID_Tvo
+ },
+ { /* TVO controller module - CVE clock
+ * Module sub clocks must immediately follow main clock
+ */
+ NvRmModuleID_Tvo, 0, 1,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllD0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_CVE_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_CVE_0_CVE_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_CVE_0_CVE_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_CVE_0_CVE_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_CVE_0_CVE_CLK_DIVISOR_SHIFT,
+
+ // Combined TVO, and CVE reset and and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_TVO_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_TVO_RST_FIELD,
+ NvRmDiagModuleID_Cve
+ },
+ { /* TVO controller module - TVDAC clock
+ * Module sub clocks must immediately follow main clock
+ */
+ NvRmModuleID_Tvo, 0, 2,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllD0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVDAC_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVDAC_0_TVDAC_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVDAC_0_TVDAC_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVDAC_0_TVDAC_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVDAC_0_TVDAC_CLK_DIVISOR_SHIFT,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_TVDAC_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_TVDAC_RST_FIELD,
+ NvRmDiagModuleID_Tvdac
+ },
+
+ { /* HDMI controller module */
+ NvRmModuleID_Hdmi, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllD0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(HDMI, H),
+ NvRmDiagModuleID_Hdmi
+ },
+
+ { /* VDE controller module (VDE and BSEV clocks)
+ * These clocks does not have source selector/divider registers,
+ * and should always be enabled/reset in sync. Threfore, no need
+ * for separate VDE and BSEV subclock descriptors
+ */
+ NvRmModuleID_Vde, 0, 0,
+ {
+ NvRmClockSource_Vbus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ // Combined VDE and BSEV reset and and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ (CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_VDE_FIELD |
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_BSEV_FIELD),
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ (CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_VDE_RST_FIELD |
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_BSEV_RST_FIELD),
+ NvRmDiagModuleID_Vde
+ },
+
+ { /* BSEA controller module */
+ NvRmModuleID_BseA, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_BSEA_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_BSEA_RST_FIELD,
+ NvRmDiagModuleID_Bsea
+ },
+
+ { /* VCP controller module */
+ NvRmModuleID_Vcp, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_VCP_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_VCP_RST_FIELD,
+ NvRmDiagModuleID_Vcp
+ },
+
+ { /* Timer controller module */
+ NvRmModuleID_Timer, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_TMR_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_TMR_RST_FIELD,
+ NvRmDiagModuleID_Timer
+ },
+
+ { /* System Monitor controller module */
+ NvRmModuleID_SysStatMonitor, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_STAT_MON_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_STAT_MON_RST_FIELD,
+ NvRmDiagModuleID_StatMon
+ },
+
+ { /* GPIO controller module */
+ NvRmPrivModuleID_Gpio, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_GPIO_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_GPIO_RST_FIELD,
+ NvRmDiagModuleID_Gpio
+ },
+
+ { /* USB controller module */
+ NvRmModuleID_Usb2Otg, 0, 0,
+ {
+ NvRmClockSource_PllU0
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_USBD_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_USBD_RST_FIELD,
+ NvRmDiagModuleID_Usb
+ },
+
+ { /* USB controller module */
+ NvRmModuleID_Usb2Otg, 1, 0,
+ {
+ NvRmClockSource_PllU0
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_USBD_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_USBD_RST_FIELD,
+ NvRmDiagModuleID_Usb
+ },
+
+ { /* APB DMA controller module */
+ NvRmPrivModuleID_ApbDma, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_APBDMA_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_APBDMA_RST_FIELD,
+ NvRmDiagModuleID_ApbDma
+ },
+
+ { /* AC97 controller module */
+ NvRmModuleID_Ac97, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_AC97_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_AC97_RST_FIELD,
+ NvRmDiagModuleID_Ac97
+ },
+
+ { /* Keyboard controller module */
+ NvRmModuleID_Kbc, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_KBC_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_KBC_RST_FIELD,
+ NvRmDiagModuleID_Kbc
+ },
+
+ { /* RTC controller module */
+ NvRmModuleID_Rtc, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_RTC_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_RTC_RST_FIELD,
+ NvRmDiagModuleID_Rtc
+ },
+
+ { /* Fuse controller module */
+ NvRmModuleID_Fuse, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_FUSE_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_FUSE_RST_FIELD,
+ NvRmDiagModuleID_Fuse
+ },
+
+ { /* Power Management controller module */
+ NvRmModuleID_Pmif, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_PMC_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_PMC_RST_FIELD,
+ NvRmDiagModuleID_Pmc
+ },
+
+ { /* CPU cache controller module */
+ NvRmModuleID_CacheMemCtrl, 0, 0,
+ {
+ NvRmClockSource_CpuBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_CACHE1_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_CACHE1_RST_FIELD,
+ NvRmDiagModuleID_Cache
+ },
+ { /* COP (AVP) cache controller module */
+ NvRmModuleID_CacheMemCtrl, 1, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_CACHE2_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_CACHE2_RST_FIELD,
+ NvRmDiagModuleID_Cache
+ },
+
+ { /* DSI controller module */
+ NvRmModuleID_Dsi, 0, 0,
+ {
+ NvRmClockSource_PllD0
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_DSI_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_DSI_RST_FIELD,
+ NvRmDiagModuleID_Dsi
+ },
+
+ { /* CSI controller module */
+ NvRmModuleID_Csi, 0, 0,
+ {
+ NvRmClockSource_SystemBus // TODO: find a proper clock source
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_CSI_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_CSI_RST_FIELD,
+ NvRmDiagModuleID_Csi
+ },
+
+ { /* ISP controller module */
+ NvRmModuleID_Isp, 0, 0,
+ {
+ NvRmClockSource_SystemBus // TODO: find a proper clock source
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_ISP_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_ISP_RST_FIELD,
+ NvRmDiagModuleID_Isp
+ },
+
+ { /* CPU module */
+ NvRmModuleID_Cpu, 0, 0,
+ {
+ NvRmClockSource_CpuBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_CPU_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_CPU_RST_FIELD,
+ NvRmDiagModuleID_Cpu
+ },
+
+ { /* COP (AVP) module */
+ NvRmModuleID_Avp, 0, 0,
+ {
+ NvRmClockSource_SystemBus // TODO: Add COP skipper source?
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ 0, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_COP_RST_FIELD,
+ NvRmDiagModuleID_Coprocessor
+ },
+
+ { /* Memory controller module */
+ NvRmPrivModuleID_MemoryController, 0, 0,
+ {
+ NvRmClockSource_ClkM,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkS,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllP4,
+ NvRmClockSource_PllP3,
+ NvRmClockSource_ClkD
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(MEM, H),
+ NvRmDiagModuleID_Mc
+ },
+
+ { /* External Memory controller module */
+ NvRmPrivModuleID_ExternalMemoryController, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_ClkM,
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_DIVISOR_SHIFT,
+
+ // EMC has 1x and 2x domains clock enable bits located in the source
+ // register. There is also a gloabl clock enable bit in CLK_OUT_ENB_L_0
+ // register, which is not described here. All 3 bits are set/cleared
+ // in Ap15EnableModuleClock() function below.
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0,
+ (CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_ENB_FIELD |
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_1X_CLK_ENB_FIELD),
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_EMC_RST_FIELD,
+ NvRmDiagModuleID_Emc
+ }
+};
+
+NvU32 const g_Ap15ModuleClockTableSize = NV_ARRAY_SIZE(g_Ap15ModuleClockTable);
+
+/*****************************************************************************/
+/*****************************************************************************/
+// Clock sources
+
+static const NvRmFixedClockInfo s_Ap15FixedClockTable[] =
+{
+ {
+ NvRmClockSource_ClkS,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ClkM,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ClkD,
+ NvRmClockSource_ClkM,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0_CLK_M_DOUBLER_ENB_FIELD
+ },
+
+ {
+ NvRmClockSource_ExtSpdf,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ExtI2s1,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ExtI2s2,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ExtAc97,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ExtAudio1,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ExtAudio2,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ExtVi,
+ NvRmClockSource_Invalid,
+ 0, 0
+ }
+};
+
+static const NvU32 s_Ap15FixedClockTableSize = NV_ARRAY_SIZE(s_Ap15FixedClockTable);
+
+/*****************************************************************************/
+
+// TODO: Specify PLL ref divider in OSC control reg as PLL C, D, M, P, U source
+
+/*
+ * Notation clarification: in h/w documentation PLL base outputs (except PLLA
+ * output) are denoted as PllX_OUT0, and the seconadry PLL outputs (if any)
+ * after fractional dividers are denoted as PllX_OUT1, PllX_OUT2, .... However,
+ * no h/w name is defined for the base PLLA output, and the output of the PLLA
+ * secondary divider is marked as PllA_OUT0 (not PllA_OUT1). Threfore, we use
+ * PllA1 (not PllA0) to denote base PLLA clock.
+ */
+static const NvRmPllClockInfo s_Ap15PllClockTable[] =
+{
+ { /* PLLA base output */
+ NvRmClockSource_PllA1,
+ NvRmClockSource_PllP1,
+ NvRmPllType_LP,
+ CLK_RST_CONTROLLER_PLLA_BASE_0,
+ CLK_RST_CONTROLLER_PLLA_MISC_0,
+ 50000,
+ 1000000
+ },
+
+ { /* PLLC base output */
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM,
+ NvRmPllType_LP,
+ CLK_RST_CONTROLLER_PLLC_BASE_0,
+ CLK_RST_CONTROLLER_PLLC_MISC_0,
+ 100000,
+ 1400000
+ },
+
+ { /* PLLM base output */
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM,
+ NvRmPllType_LP,
+ CLK_RST_CONTROLLER_PLLM_BASE_0,
+ CLK_RST_CONTROLLER_PLLM_MISC_0,
+ 100000,
+ 1000000
+ },
+
+ { /* PLLP base output */
+ NvRmClockSource_PllP0,
+ NvRmClockSource_ClkM,
+ NvRmPllType_LP,
+ CLK_RST_CONTROLLER_PLLP_BASE_0,
+ CLK_RST_CONTROLLER_PLLP_MISC_0,
+ 100000,
+ 1000000
+ },
+
+ { /* PLLD base output */
+ NvRmClockSource_PllD0,
+ NvRmClockSource_ClkM,
+ NvRmPllType_MIPI,
+ CLK_RST_CONTROLLER_PLLD_BASE_0,
+ CLK_RST_CONTROLLER_PLLD_MISC_0,
+ 100000,
+ 1000000
+ },
+
+ { /* PLLU base output */
+ NvRmClockSource_PllU0,
+ NvRmClockSource_ClkM,
+ NvRmPllType_MIPI,
+ CLK_RST_CONTROLLER_PLLU_BASE_0,
+ CLK_RST_CONTROLLER_PLLU_MISC_0,
+ 100000,
+ 1000000
+ }
+};
+
+static const NvU32 s_Ap15PllClockTableSize = NV_ARRAY_SIZE(s_Ap15PllClockTable);
+
+/*****************************************************************************/
+
+static const NvRmDividerClockInfo s_Ap15DividerClockTable[] =
+{
+ { /* PLLA0 - PLLA secondary output */
+ NvRmClockSource_PllA0,
+ NvRmClockSource_PllA1,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLA_OUT_0,
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* PLLC1 - PLLC secondary output */
+ NvRmClockSource_PllC1,
+ NvRmClockSource_PllC0,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLC_OUT_0,
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* PLLM1 - PLLM secondary ouput */
+ NvRmClockSource_PllM1,
+ NvRmClockSource_PllM0,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLM_OUT_0,
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* PLLP1 - PLLP secondary output (overridden) */
+ NvRmClockSource_PllP1,
+ NvRmClockSource_PllP0,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLP_OUTA_0,
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_OVRRIDE_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_OVRRIDE_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_OVRRIDE_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* PLLP2 - PLLP secondary output (overridden) */
+ NvRmClockSource_PllP2,
+ NvRmClockSource_PllP0,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLP_OUTA_0,
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_OVRRIDE_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_OVRRIDE_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_OVRRIDE_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* PLLP3 - PLLP secondary output (overridden) */
+ NvRmClockSource_PllP3,
+ NvRmClockSource_PllP0,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLP_OUTB_0,
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_OVRRIDE_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_OVRRIDE_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_OVRRIDE_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* PLLP4 - PLLP secondary output (overridden) */
+ NvRmClockSource_PllP4,
+ NvRmClockSource_PllP0,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLP_OUTB_0,
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_OVRRIDE_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_OVRRIDE_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_OVRRIDE_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* AHB bus clock divider */
+ NvRmClockSource_Ahb,
+ NvRmClockSource_SystemBus,
+ NvRmClockDivider_Integer_1,
+
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0,
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_AHB_RATE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_AHB_RATE_SHIFT,
+
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_HCLK_DIS_FIELD,
+ (0x0 << CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_HCLK_DIS_SHIFT),
+ (0x1 << CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_HCLK_DIS_SHIFT),
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* APB bus clock divider */
+ NvRmClockSource_Apb,
+ NvRmClockSource_Ahb,
+ NvRmClockDivider_Integer_1,
+
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0,
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_APB_RATE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_APB_RATE_SHIFT,
+
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_PCLK_DIS_FIELD,
+ (0x0 << CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_PCLK_DIS_SHIFT),
+ (0x1 << CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_PCLK_DIS_SHIFT),
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* V-pipe clock divider */
+ NvRmClockSource_Vbus,
+ NvRmClockSource_SystemBus,
+ NvRmClockDivider_Keeper16,
+
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0,
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_VCLK_RATE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_VCLK_RATE_SHIFT,
+ 0, 0, 0,
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ // TODO: PLL ref divider
+};
+
+static const NvU32 s_Ap15DividerClockTableSize = NV_ARRAY_SIZE(s_Ap15DividerClockTable);
+
+/*****************************************************************************/
+
+static const NvRmCoreClockInfo s_Ap15CoreClockTable[] =
+{
+ {
+ NvRmClockSource_CpuBus,
+ {
+ NvRmClockSource_ClkM,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkS,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllP4,
+ NvRmClockSource_PllP3,
+ NvRmClockSource_ClkD
+ },
+
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CPU_STATE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CPU_STATE_SHIFT,
+ {
+ 0,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_IDLE_SOURCE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_RUN_SOURCE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_IRQ_SOURCE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_FIQ_SOURCE_DEFAULT_MASK
+
+ },
+ {
+ 0,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_IDLE_SOURCE_SHIFT,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_RUN_SOURCE_SHIFT,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_IRQ_SOURCE_SHIFT,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_FIQ_SOURCE_SHIFT
+ },
+
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0,
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_ENB_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_ENB_SHIFT,
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_DIVIDEND_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_DIVIDEND_SHIFT,
+ NV_FIELD_SIZE(CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_DIVIDEND_RANGE),
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_DIVISOR_SHIFT,
+ NV_FIELD_SIZE(CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_DIVISOR_RANGE)
+ },
+ {
+ NvRmClockSource_SystemBus,
+ {
+ NvRmClockSource_ClkM,
+ NvRmClockSource_PllC1,
+ NvRmClockSource_PllP4,
+ NvRmClockSource_PllP3,
+ NvRmClockSource_PllP2,
+ NvRmClockSource_ClkD,
+ NvRmClockSource_ClkS,
+ NvRmClockSource_PllM1,
+ },
+
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SYS_STATE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SYS_STATE_SHIFT,
+ {
+ 0,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_IDLE_SOURCE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_RUN_SOURCE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_IRQ_SOURCE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_FIQ_SOURCE_DEFAULT_MASK
+
+ },
+ {
+ 0,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_IDLE_SOURCE_SHIFT,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_RUN_SOURCE_SHIFT,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_IRQ_SOURCE_SHIFT,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_FIQ_SOURCE_SHIFT
+ },
+
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0,
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_ENB_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_ENB_SHIFT,
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_DIVIDEND_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_DIVIDEND_SHIFT,
+ NV_FIELD_SIZE(CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_DIVIDEND_RANGE),
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_DIVISOR_SHIFT,
+ NV_FIELD_SIZE(CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_DIVISOR_RANGE)
+ }
+};
+
+static const NvU32 s_Ap15CoreClockTableSize = NV_ARRAY_SIZE(s_Ap15CoreClockTable);
+
+/*****************************************************************************/
+
+static const NvRmSelectorClockInfo s_Ap15SelectorClockTable[] =
+{
+ {
+ NvRmClockSource_AudioSync,
+ {
+ NvRmClockSource_ExtSpdf,
+ NvRmClockSource_ExtI2s1,
+ NvRmClockSource_ExtI2s2,
+ NvRmClockSource_ExtAc97,
+ NvRmClockSource_PllA0,
+ NvRmClockSource_ExtAudio2,
+ NvRmClockSource_ExtAudio1,
+ NvRmClockSource_ExtVi
+ },
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0,
+
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_SYNC_CLK_RATE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_SYNC_CLK_RATE_SHIFT,
+
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0_SYNC_CLK_DOUBLER_ENB_FIELD
+ },
+ {
+ NvRmClockSource_MpeAudio,
+ {
+ NvRmClockSource_ExtSpdf,
+ NvRmClockSource_ExtI2s1,
+ NvRmClockSource_ExtI2s2,
+ NvRmClockSource_ExtAc97,
+ NvRmClockSource_PllA0,
+ NvRmClockSource_ExtAudio2,
+ NvRmClockSource_ExtAudio1,
+ NvRmClockSource_ExtVi
+ },
+ CLK_RST_CONTROLLER_CLK_SOURCE_MPE_AUDIO_0,
+
+ CLK_RST_CONTROLLER_CLK_SOURCE_MPE_AUDIO_0_MPE_AUDIO_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_MPE_AUDIO_0_MPE_AUDIO_CLK_SRC_SHIFT,
+ 0, 0
+ }
+};
+
+static const NvU32 s_Ap15SelectorClockTableSize = NV_ARRAY_SIZE(s_Ap15SelectorClockTable);
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+static NvRmClockSourceInfo s_Ap15ClockSourceTable[NvRmClockSource_Num] = {{0}};
+
+NvRmClockSourceInfo* NvRmPrivAp15ClockSourceTableInit(void)
+{
+ NvRmClockSourceInfoPtr Src;
+
+#define PARSE_SOURCE_TABLE(type) \
+do\
+{\
+ Src.p##type = (NvRm##type##ClockInfo*)s_Ap15##type##ClockTable;\
+ NvRmPrivParseClockSources( \
+ s_Ap15ClockSourceTable, NvRmClockSource_Num, \
+ Src, s_Ap15##type##ClockTableSize, NvRmClockSourceType_##type); \
+} while(0)
+
+ NvOsMemset(s_Ap15ClockSourceTable, 0, sizeof(s_Ap15ClockSourceTable));
+
+ PARSE_SOURCE_TABLE(Fixed);
+ PARSE_SOURCE_TABLE(Pll);
+ PARSE_SOURCE_TABLE(Divider);
+ PARSE_SOURCE_TABLE(Core);
+ PARSE_SOURCE_TABLE(Selector);
+
+#undef PARSE_SOURCE_TABLE
+
+ return &s_Ap15ClockSourceTable[0];
+}
+
+/*****************************************************************************/
+
+static NvBool s_Ap15PllM0Clocks[NV_ARRAY_SIZE(g_Ap15ModuleClockTable)] = {0};
+static NvBool s_Ap15PllC0Clocks[NV_ARRAY_SIZE(g_Ap15ModuleClockTable)] = {0};
+static NvBool s_Ap15PllP0Clocks[NV_ARRAY_SIZE(g_Ap15ModuleClockTable)] = {0};
+static NvBool s_Ap15PllA0Clocks[NV_ARRAY_SIZE(g_Ap15ModuleClockTable)] = {0};
+static NvBool s_Ap15PllD0Clocks[NV_ARRAY_SIZE(g_Ap15ModuleClockTable)] = {0};
+
+static NvRmPllReference s_Ap15PllReferencesTable[] =
+{
+ { NvRmClockSource_PllM0, NvRmDfsStatusFlags_StopPllM0, 0, s_Ap15PllM0Clocks, 0 },
+ { NvRmClockSource_PllC0, NvRmDfsStatusFlags_StopPllC0, 0, s_Ap15PllC0Clocks, 0 },
+ { NvRmClockSource_PllP0, NvRmDfsStatusFlags_StopPllP0, 0, s_Ap15PllP0Clocks, 0 },
+ { NvRmClockSource_PllA0, NvRmDfsStatusFlags_StopPllA0, 0, s_Ap15PllA0Clocks, 0 },
+ { NvRmClockSource_PllD0, NvRmDfsStatusFlags_StopPllD0, 0, s_Ap15PllD0Clocks, 0 },
+};
+static const NvU32 s_Ap15PllReferencesTableSize =
+ NV_ARRAY_SIZE(s_Ap15PllReferencesTable);
+
+void
+NvRmPrivAp15PllReferenceTableInit(
+ NvRmPllReference** pPllReferencesTable,
+ NvU32* pPllReferencesTableSize)
+{
+ NvU32 i;
+ for (i = 0; i < s_Ap15PllReferencesTableSize; i++)
+ {
+ NvOsMemset(s_Ap15PllReferencesTable[i].AttachedModules, 0,
+ sizeof(NvBool) * g_Ap15ModuleClockTableSize);
+ s_Ap15PllReferencesTable[i].ReferenceCnt = 0;
+ s_Ap15PllReferencesTable[i].ExternalClockRefCnt = 0;
+ }
+ *pPllReferencesTable = s_Ap15PllReferencesTable;
+ *pPllReferencesTableSize = s_Ap15PllReferencesTableSize;
+}
+
+/*****************************************************************************/
+
+// Power Gating Ids for each Power Group specified in re-location table header
+static const NvU32 s_Ap15PowerGroupIds[] = { NV_POWERGROUP_ENUM_TABLE };
+
+void
+NvRmPrivAp15PowerGroupTableInit(
+ const NvU32** pPowerGroupIdsTable,
+ NvU32* pPowerGroupIdsTableSize)
+{
+ *pPowerGroupIdsTable = s_Ap15PowerGroupIds;
+ *pPowerGroupIdsTableSize = NV_ARRAY_SIZE(s_Ap15PowerGroupIds);
+}
+
+/*****************************************************************************/
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_fuse.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_fuse.c
new file mode 100644
index 000000000000..0e0a8b21adab
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_fuse.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/** @file
+ * @brief <b>NVIDIA Driver Development Kit: Fuse API</b>
+ *
+ * @b Description: Contains the NvRM Chip unique id implementation.
+ */
+#include "nvassert.h"
+#include "nvrm_drf.h"
+#include "nvos.h"
+#include "nvrm_module.h"
+#include "nvrm_hardware_access.h"
+#include "nvrm_hwintf.h"
+#include "ap15/arclk_rst.h"
+#include "ap15/arfuse.h"
+#include "ap15/ap15rm_private.h"
+#include "ap15rm_clocks.h"
+
+NvError NvRmPrivAp15ChipUniqueId(NvRmDeviceHandle hDevHandle,void* pId)
+{
+ NvU32 OldRegData; // Old register contents
+ NvU32 NewRegData; // New register contents
+ NvU64 Temp; // Temp buffer to read the contents of fuses
+ NV_ASSERT(hDevHandle);
+ NV_ASSERT(pId);
+
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Enable fuse clock
+ Ap15EnableModuleClock(hDevHandle, NvRmModuleID_Fuse, NV_TRUE);
+#endif
+
+ // Access to unique id is protected, so make sure all registers visible
+ // first.
+ OldRegData = NV_REGR(hDevHandle,
+ NvRmPrivModuleID_ClockAndReset,
+ 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0);
+ NewRegData = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER,
+ MISC_CLK_ENB,
+ CFG_ALL_VISIBLE,
+ 1,
+ OldRegData);
+ NV_REGW(hDevHandle,
+ NvRmPrivModuleID_ClockAndReset,
+ 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0,
+ NewRegData);
+
+ // Read the secure id from the fuse registers in to a local buffer
+ Temp = ((NvU64)NV_REGR(hDevHandle,
+ (NvRmPrivModuleID)NvRmModuleID_Fuse,
+ 0,
+ FUSE_JTAG_SECUREID_0_0)) |
+ (((NvU64)NV_REGR(hDevHandle,
+ (NvRmPrivModuleID)NvRmModuleID_Fuse,
+ 0,
+ FUSE_JTAG_SECUREID_1_0)) << 32);
+ // Copy the read data to output buffer
+ NvOsMemcpy(pId,&Temp,sizeof(NvU64));
+
+ // Restore the protected registers enable to the way we found it.
+ NV_REGW(hDevHandle,
+ NvRmPrivModuleID_ClockAndReset,
+ 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0,
+ OldRegData);
+
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Disable fuse clock
+ Ap15EnableModuleClock(hDevHandle, NvRmModuleID_Fuse, NV_FALSE);
+#endif
+
+ return NvError_Success;
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_gart.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_gart.c
new file mode 100644
index 000000000000..352016929d4b
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_gart.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "ap15/armc.h"
+#include "nvrm_heap.h"
+#include "nvrm_heap_simple.h"
+#include "nvrm_hwintf.h"
+#include "ap15rm_private.h"
+#include "nvassert.h"
+#include "nvcommon.h"
+#include "nvrm_drf.h"
+
+
+/**
+ * Initialize the GART entries, and enable the GART
+ */
+
+#define GART_PAGE_SHIFT (12)
+#define GART_PAGE_SIZE (4096)
+
+extern NvBool gs_GartInited;
+extern NvRmHeapSimple gs_GartAllocator;
+extern NvU32 *gs_GartSave;
+
+/**
+ * Initializes all of the TLB entries in the GART and enables GART translations
+ * All entries are initially marked invalid.
+ *
+ * @param hDevice The RM device handle.
+ */
+static NvError
+NvRmPrivAp15InitGART(NvRmDeviceHandle hDevice);
+static NvError
+NvRmPrivAp15InitGART(NvRmDeviceHandle hDevice)
+{
+ NvU32 GartSize;
+ NvU32 GartEntries;
+ NvU32 GartEntry;
+ NvU32 reg;
+ NvU32 data;
+
+ NV_ASSERT(hDevice != NULL);
+
+ NvRmModuleGetBaseAddress(
+ hDevice, NvRmPrivModuleID_Gart, NULL, &GartSize);
+
+ GartEntries = GartSize / GART_PAGE_SIZE;
+
+ gs_GartSave = NvOsAlloc( sizeof(NvU32) * GartEntries );
+ if ( NULL == gs_GartSave )
+ return NvError_InsufficientMemory;
+
+ data = NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR_VALID, 0);
+ for (GartEntry = 0; GartEntry < GartEntries; ++GartEntry)
+ {
+ // set the address
+ reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR, GartEntry);
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_ADDR_0, reg);
+
+ // mark the entry invalid
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0, data);
+ }
+
+ // now enable the GART
+ reg = NV_DRF_DEF(MC, GART_CONFIG, GART_ENABLE, ENABLE);
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_CONFIG_0, reg);
+ return NvSuccess;
+}
+
+NvError
+NvRmPrivAp15HeapGartAlloc(
+ NvRmDeviceHandle hDevice,
+ NvOsPageAllocHandle hPageHandle,
+ NvU32 NumberOfPages,
+ NvRmPhysAddr *PAddr)
+{
+ NvError result = NvSuccess;
+ NvU32 reg;
+ NvU32 i, data;
+ NvU32 FirstGartPage;
+
+ NV_ASSERT(hDevice);
+ NV_ASSERT(hPageHandle);
+
+ result = NvRmPrivHeapSimpleAlloc(
+ &gs_GartAllocator,
+ NumberOfPages*GART_PAGE_SIZE,
+ GART_PAGE_SIZE,
+ PAddr);
+
+ if (result != NvSuccess)
+ return result;
+
+ FirstGartPage = *PAddr;
+
+ /* Check that the GART address exists and is page aligned */
+ NV_ASSERT(FirstGartPage);
+ NV_ASSERT((FirstGartPage & (GART_PAGE_SIZE - 1)) == 0);
+
+ NvOsMutexLock(hDevice->mutex);
+
+ // FIXME: Normally we would do this at init time, but it takes and
+ // egregious amount of csim time, so I'm defering it or the 3d guys
+ // will complain to me, and then to my boss, and then their boss, and then their bosses boss...
+ if (gs_GartInited == NV_FALSE)
+ {
+ result = NvRmPrivAp15InitGART(hDevice);
+ if ( NvSuccess != result )
+ goto fail;
+ gs_GartInited = NV_TRUE;
+ }
+
+ for (i = 0; i < NumberOfPages; i++)
+ {
+ data = (NvU32)NvOsPageAddress(hPageHandle, i * GART_PAGE_SIZE);
+
+ /* Check that each physical address is page aligned */
+ NV_ASSERT((data & (GART_PAGE_SIZE - 1)) == 0);
+
+ reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR, ((FirstGartPage + i*GART_PAGE_SIZE) >> GART_PAGE_SHIFT));
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_ADDR_0, reg);
+
+ reg =
+ NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR_VALID, 1) |
+ NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR, (data >> GART_PAGE_SHIFT));
+
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0, reg);
+ // lame, on csim we have to read this back to make sure the GART entry is valid before we hit the mc
+ // with data to this address.
+ (void)NV_REGR(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0);
+ }
+fail:
+ NvOsMutexUnlock(hDevice->mutex);
+
+ return result;
+}
+
+void
+NvRmPrivAp15HeapGartFree(
+ NvRmDeviceHandle hDevice,
+ NvRmPhysAddr addr,
+ NvU32 NumberOfPages)
+{
+ NvU32 i;
+ NvU32 reg;
+ NvU32 data;
+
+ NV_ASSERT(hDevice);
+
+ if (addr && NumberOfPages)
+ {
+ // Invalidate GART page table entries
+ data = NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR_VALID, 0);
+ for (i = 0; i < NumberOfPages; i++)
+ {
+ // set the address
+ reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR, ((addr + i*GART_PAGE_SIZE) >> GART_PAGE_SHIFT));
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_ADDR_0, reg);
+
+ // mark the entry invalid
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0, data);
+ }
+ NvRmPrivHeapSimpleFree(&gs_GartAllocator, addr);
+ }
+}
+
+
+void
+NvRmPrivAp15GartSuspend(NvRmDeviceHandle hDevice)
+{
+ NvU32 reg;
+ NvU32 GartSize;
+ NvU32 GartEntries;
+ NvU32 GartEntry;
+
+ NvOsMutexLock(hDevice->mutex);
+ if (gs_GartInited == NV_TRUE)
+ {
+ NvRmModuleGetBaseAddress(
+ hDevice, NvRmPrivModuleID_Gart, NULL, &GartSize);
+ GartEntries = GartSize / GART_PAGE_SIZE;
+
+ for (GartEntry = 0; GartEntry < GartEntries; GartEntry++)
+ {
+ reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR,
+ GartEntry);
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
+ MC_GART_ENTRY_ADDR_0, reg);
+ gs_GartSave[GartEntry] = NV_REGR(hDevice,
+ NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0);
+ }
+ }
+ NvOsMutexUnlock(hDevice->mutex);
+}
+
+void
+NvRmPrivAp15GartResume(NvRmDeviceHandle hDevice)
+{
+ NvU32 reg;
+ NvU32 GartSize;
+ NvU32 GartEntries;
+ NvU32 GartEntry;
+
+ NvOsMutexLock(hDevice->mutex);
+ if (gs_GartInited == NV_TRUE)
+ {
+ NvRmModuleGetBaseAddress(
+ hDevice, NvRmPrivModuleID_Gart, NULL, &GartSize);
+ GartEntries = GartSize / GART_PAGE_SIZE;
+
+ for (GartEntry = 0; GartEntry < GartEntries; GartEntry++)
+ {
+ reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR,
+ GartEntry);
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
+ MC_GART_ENTRY_ADDR_0, reg);
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
+ MC_GART_ENTRY_DATA_0, gs_GartSave[GartEntry] );
+ }
+
+ reg = NV_DRF_DEF(MC, GART_CONFIG, GART_ENABLE, ENABLE);
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
+ MC_GART_CONFIG_0, reg);
+
+ }
+ NvOsMutexUnlock(hDevice->mutex);
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_hwmap.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_hwmap.c
new file mode 100644
index 000000000000..050abdfd7147
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_hwmap.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvrm_chiplib.h"
+
+NvError NvRmPhysicalMemMap(
+ NvRmPhysAddr phys,
+ size_t size,
+ NvU32 flags,
+ NvOsMemAttribute memType,
+ void **ptr )
+{
+ return NvOsPhysicalMemMap(phys, size, memType, flags, ptr);
+}
+
+void NvRmPhysicalMemUnmap(void *ptr, size_t size)
+{
+ NvOsPhysicalMemUnmap(ptr, size);
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init.c
new file mode 100644
index 000000000000..915a9a8f998f
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init.c
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvutil.h"
+#include "nvassert.h"
+#include "nvrm_drf.h"
+#include "nvrm_init.h"
+#include "nvrm_rmctrace.h"
+#include "nvrm_configuration.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_heap.h"
+#include "nvrm_pmu_private.h"
+#include "nvrm_processor.h"
+#include "ap15rm_private.h"
+#include "nvrm_structure.h"
+#include "ap15rm_private.h"
+#include "ap15rm_clocks.h"
+#include "nvodm_query.h"
+#include "nvodm_query_pins.h"
+#include "common/nvrm_hwintf.h"
+#include "ap15/armc.h"
+#include "ap15/aremc.h"
+#include "ap15/project_relocation_table.h"
+#include "ap15/arapb_misc.h"
+#include "ap15/arapbpm.h"
+#include "nvrm_pinmux_utils.h"
+#include "ap15/arfuse.h"
+
+static NvRmDevice gs_Rm;
+
+extern NvRmCfgMap g_CfgMap[];
+
+void NvRmPrivMemoryInfo( NvRmDeviceHandle hDevice );
+extern NvError NvRmPrivMapApertures( NvRmDeviceHandle rm );
+extern void NvRmPrivUnmapApertures( NvRmDeviceHandle rm );
+extern NvError NvRmPrivPwmInit(NvRmDeviceHandle hRm);
+extern void NvRmPrivPwmDeInit(NvRmDeviceHandle hRm);
+extern NvU32 NvRmPrivGetBctCustomerOption(NvRmDeviceHandle hRm);
+extern void NvRmPrivReadChipId( NvRmDeviceHandle rm );
+extern NvU32 *NvRmPrivGetRelocationTable( NvRmDeviceHandle hDevice );
+extern NvError NvRmPrivPcieOpen(NvRmDeviceHandle hDeviceHandle);
+extern void NvRmPrivPcieClose(NvRmDeviceHandle hDeviceHandle);
+static void NvRmPrivInitPinAttributes(NvRmDeviceHandle rm);
+static void NvRmPrivBasicReset( NvRmDeviceHandle rm );
+static NvError NvRmPrivMcErrorMonitorStart( NvRmDeviceHandle rm );
+static void NvRmPrivMcErrorMonitorStop( NvRmDeviceHandle rm );
+
+#if !NV_OAL
+/* This function sets some performance timings for Mc & Emc. Numbers are from
+ * the Arch team.
+ */
+static void
+NvRmPrivSetupMc(NvRmDeviceHandle hRm)
+{
+ switch (hRm->ChipId.Id) {
+ case 0x15:
+ case 0x16:
+ NvRmPrivAp15SetupMc(hRm);
+ break;
+ case 0x20:
+ NvRmPrivAp20SetupMc(hRm);
+ break;
+ default:
+ NV_ASSERT(!"Unsupported chip ID");
+ break;
+ }
+}
+#endif
+
+NvError
+NvRmOpen(NvRmDeviceHandle *pHandle, NvU32 DeviceId ) {
+ return NvRmOpenNew(pHandle);
+}
+
+void NvRmInit(
+ NvRmDeviceHandle * pHandle )
+{
+ NvU32 *table = 0;
+ NvRmDevice *rm = 0;
+ rm = &gs_Rm;
+
+ if( rm->bPreInit )
+ {
+ return;
+ }
+
+ /* Read the chip Id and store in the Rm structure. */
+ NvRmPrivReadChipId( rm );
+
+ /* parse the relocation table */
+ table = NvRmPrivGetRelocationTable( rm );
+ NV_ASSERT(table != NULL);
+
+ NV_ASSERT_SUCCESS(NvRmPrivModuleInit( &rm->ModuleTable, table ));
+
+ NvRmPrivMemoryInfo( rm );
+
+ NvRmPrivInterruptTableInit( rm );
+
+ rm->bPreInit = NV_TRUE;
+ *pHandle = rm;
+
+ return;
+}
+
+NvError
+NvRmOpenNew(NvRmDeviceHandle *pHandle)
+{
+ NvError err;
+ NvRmDevice *rm = 0;
+ NvU32 *table = 0;
+
+ NvU32 CarveoutBaseAddr;
+ NvU32 CarveoutSize = 0;
+ NvU32 BctCustomerOption = 0;
+ NvU64 Uid = 0;
+
+ NvOsMutexHandle rmMutex = NULL;
+
+ /* open the nvos trace file */
+ NVOS_TRACE_LOG_START;
+
+ // OAL does not support these mutexes
+ if (gs_Rm.mutex == NULL)
+ {
+ err = NvOsMutexCreate(&rmMutex);
+ if (err != NvSuccess)
+ return err;
+
+ if (NvOsAtomicCompareExchange32((NvS32*)&gs_Rm.mutex, 0,
+ (NvS32)rmMutex) != 0)
+ NvOsMutexDestroy(rmMutex);
+ }
+
+ NvOsMutexLock(gs_Rm.mutex);
+ rm = &gs_Rm;
+
+ if(rm->refcount )
+ {
+ rm->refcount++;
+ *pHandle = rm;
+ NvOsMutexUnlock(gs_Rm.mutex);
+ return NvSuccess;
+ }
+
+ rmMutex = gs_Rm.mutex;
+ gs_Rm.mutex = rmMutex;
+
+ // create the memmgr mutex
+ err = NvOsMutexCreate(&rm->MemMgrMutex);
+ if (err)
+ goto fail;
+
+ // create mutex for the clock and reset r-m-w top level registers access
+ err = NvOsMutexCreate(&rm->CarMutex);
+ if (err)
+ goto fail;
+
+ /* NvRmOpen needs to be re-entrant to allow I2C, GPIO and KeyList ODM
+ * services to be available to the ODM query. Therefore, the refcount is
+ * bumped extremely early in initialization, and if any initialization
+ * fails the refcount is reset to 0.
+ */
+ rm->refcount = 1;
+
+ if( !rm->bBasicInit )
+ {
+ /* get the default configuration */
+ err = NvRmPrivGetDefaultCfg( g_CfgMap, &rm->cfg );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ /* get the requested configuration */
+ err = NvRmPrivReadCfgVars( g_CfgMap, &rm->cfg );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+ }
+
+ /* start chiplib */
+ if (rm->cfg.Chiplib[0] != '\0')
+ {
+ err = NvRmPrivChiplibStartup( rm->cfg.Chiplib, rm->cfg.ChiplibArgs,
+ NULL );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+ }
+
+ /* open the RMC file */
+ err = NvRmRmcOpen( rm->cfg.RMCTraceFileName, &rm->rmc );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ if( !rm->bPreInit )
+ {
+ /* Read the chip Id and store in the Rm structure. */
+ NvRmPrivReadChipId( rm );
+
+ /* parse the relocation table */
+ table = NvRmPrivGetRelocationTable( rm );
+ if( !table )
+ {
+ goto fail;
+ }
+
+ err = NvRmPrivModuleInit( &rm->ModuleTable, table );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+ NvRmPrivMemoryInfo( rm );
+
+ // Now populate the logical interrupt table.
+ NvRmPrivInterruptTableInit( rm );
+ }
+
+ if( !rm->bBasicInit && !NVOS_IS_WINDOWS_X86 )
+ {
+ err = NvRmPrivMapApertures( rm );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ // Initializing the ODM-defined key list
+ // This gets initialized first, since the RMs calls into
+ // the ODM query may result in the ODM query calling
+ // back into the RM to get this value!
+ BctCustomerOption = NvRmPrivGetBctCustomerOption(rm);
+ err = NvRmPrivInitKeyList(rm, &BctCustomerOption, 1);
+ if (err != NvSuccess)
+ {
+ goto fail;
+ }
+ }
+
+ // prevent re-inits
+ rm->bBasicInit = NV_TRUE;
+ rm->bPreInit = NV_TRUE;
+
+ CarveoutSize = NvOdmQueryCarveoutSize();
+ CarveoutBaseAddr = rm->ExtMemoryInfo.base +
+ NvOdmQueryMemSize(NvOdmMemoryType_Sdram) - CarveoutSize;
+
+ NvRmPrivHeapCarveoutInit(CarveoutSize, CarveoutBaseAddr);
+ NvRmPrivHeapIramInit(rm->IramMemoryInfo.size, rm->IramMemoryInfo.base);
+ NvRmPrivPreservedMemHandleInit(rm);
+
+ if (!NVOS_IS_WINDOWS_X86)
+ {
+ // Initialize the GART heap (size & base address)
+ NvRmPrivHeapGartInit( rm );
+
+ NvRmPrivCheckBondOut( rm );
+
+ /* bring modules out of reset */
+ NvRmPrivBasicReset( rm );
+
+ /* initialize power manager before any other module that may access
+ * clock or voltage resources
+ */
+ err = NvRmPrivPowerInit(rm);
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ NvRmPrivInterruptStart( rm );
+
+ // Initializing pins attributes
+ NvRmPrivInitPinAttributes(rm);
+
+ // Initialize RM pin-mux (init's the state of internal shadow
+ // register variables)
+ NvRmInitPinMux(rm, NV_TRUE);
+
+ // Initalize the module clocks.
+ err = NvRmPrivClocksInit( rm );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+ }
+
+ if (!NVOS_IS_WINDOWS_X86)
+ {
+ // FIXME: this crashes in simulation
+ // Enabling only for the non simulation modes.
+ if ((rm->ChipId.Major == 0) && (rm->ChipId.Netlist == 0))
+ {
+ // this is the csim case, so we don't do this here.
+ }
+ else
+ {
+ // Initializing the dma.
+ err = NvRmPrivDmaInit(rm);
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ // Initializing the Spi and Slink.
+ err = NvRmPrivSpiSlinkInit(rm);
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ // Complete pin mux initialization
+ NvRmInitPinMux(rm, NV_FALSE);
+
+ // Initializing the dfs
+ err = NvRmPrivDfsInit(rm);
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+ }
+
+ // Initializing the Pwm
+ err = NvRmPrivPwmInit(rm);
+ if (err != NvSuccess)
+ {
+ goto fail;
+ }
+
+ // PMU interface init utilizes ODM services that reenter NvRmOpen().
+ // Therefore, it shall be performed after refcount is set so that
+ // reentry has no side-effects except bumping refcount. The latter
+ // is reset below so that RM can be eventually closed.
+ err = NvRmPrivPmuInit(rm);
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ // set the mc & emc tuning parameters
+ NvRmPrivSetupMc(rm);
+ if (!NvRmIsSimulation())
+ {
+ // Configure PLL rails, boost core power and clocks
+ // Initialize and start temperature monitoring
+ NvRmPrivPllRailsInit(rm);
+ NvRmPrivBoostClocks(rm);
+ NvRmPrivDttInit(rm);
+ }
+
+ if (0) /* FIXME Don't enable PCI yet */
+ {
+ err = NvRmPrivPcieOpen( rm );
+ if (err != NvSuccess && err != NvError_ModuleNotPresent)
+ {
+ goto fail;
+ }
+ }
+ // Asynchronous interrupts must be disabled until the very end of
+ // RmOpen. They can be enabled just before releasing rm mutex after
+ // completion of all initialization calls.
+ NvRmPrivPmuInterruptEnable(rm);
+
+ // Start Memory Controller Error monitoring.
+ err = NvRmPrivMcErrorMonitorStart(rm);
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ // WAR for bug 600821
+ if ((rm->ChipId.Id == 0x20) &&
+ (rm->ChipId.Major == 0x1) && (rm->ChipId.Minor == 0x2))
+ {
+ err = NvRmQueryChipUniqueId(rm, sizeof (NvU64), &Uid);
+ if ((Uid>>32) == 0x08080105)
+ {
+ NV_REGW(rm, NvRmModuleID_Pmif, 0, 0xD0, 0xFFFFFFEF);
+ }
+ }
+ }
+
+ /* assign the handle pointer */
+ *pHandle = rm;
+
+ NvOsMutexUnlock(gs_Rm.mutex);
+ return NvSuccess;
+
+fail:
+ // FIXME: free rm if it becomes dynamically allocated
+ // BUG: there are about ten places that we go to fail, and we make no
+ // effort here to clean anything up.
+ NvOsMutexUnlock(gs_Rm.mutex);
+ NV_DEBUG_PRINTF(("RM init failed\n"));
+ rm->refcount = 0;
+ return err;
+}
+
+void
+NvRmClose(NvRmDeviceHandle handle)
+{
+ if( !handle )
+ {
+ return;
+ }
+
+ NV_ASSERT( handle->mutex );
+
+ /* decrement refcount */
+ NvOsMutexLock( handle->mutex );
+ handle->refcount--;
+
+ /* do deinit if refcount is zero */
+ if( handle->refcount == 0 )
+ {
+ if (!NVOS_IS_WINDOWS_X86)
+ {
+ // PMU and DTT deinit through ODM services reenters NvRmClose().
+ // The refcount will wrap around and this will be the only reentry
+ // side-effect, which is compensated after deint exit.
+ NvRmPrivDttDeinit();
+ handle->refcount = 0;
+ NvRmPrivPmuDeinit(handle);
+ handle->refcount = 0;
+
+ if (0) /* FIXME Don't enable PCIE yet */
+ {
+ NvRmPrivPcieClose( handle );
+ }
+ }
+
+ if (!NVOS_IS_WINDOWS_X86)
+ {
+ /* disable modules */
+ // Enabling only for the non simulation modes.
+ if ((handle->ChipId.Major == 0) && (handle->ChipId.Netlist == 0))
+ {
+ // this is the csim case, so we don't do this here.
+ }
+ else
+ {
+ NvRmPrivDmaDeInit();
+
+ NvRmPrivSpiSlinkDeInit();
+
+ NvRmPrivDfsDeinit(handle);
+ }
+
+ /* deinit clock manager */
+ NvRmPrivClocksDeinit(handle);
+
+ /* deinit power manager */
+ NvRmPrivPowerDeinit(handle);
+
+ NvRmPrivDeInitKeyList(handle);
+ NvRmPrivPwmDeInit(handle);
+ // Stop Memory controller error monitoring.
+ NvRmPrivMcErrorMonitorStop(handle);
+
+ /* if anyone left an interrupt registered, this will clear it. */
+ NvRmPrivInterruptShutdown(handle);
+
+ /* unmap the apertures */
+ NvRmPrivUnmapApertures( handle );
+
+ if (NvRmIsSimulation())
+ NvRmPrivChiplibShutdown();
+
+ }
+
+ NvRmPrivHeapCarveoutDeinit();
+ NvRmPrivHeapIramDeinit();
+
+ if (!NVOS_IS_WINDOWS_X86)
+ {
+ // De-Initialize the GART heap
+ NvRmPrivHeapGartDeinit();
+ }
+
+ NvRmRmcClose( &handle->rmc );
+
+ /* deallocate the instance table */
+ NvRmPrivModuleDeinit( &handle->ModuleTable );
+
+ /* free up the CAR mutex */
+ NvOsMutexDestroy(handle->CarMutex);
+
+ /* free up the memmgr mutex */
+ NvOsMutexDestroy(handle->MemMgrMutex);
+
+ /* close the nvos trace file */
+ NVOS_TRACE_LOG_END;
+ }
+ NvOsMutexUnlock( handle->mutex );
+
+#if NVOS_IS_WINDOWS && !NVOS_IS_WINDOWS_CE
+ if( handle->refcount == 0 )
+ {
+ NvOsMutexDestroy(handle->mutex);
+ gs_Rm.mutex = 0;
+ }
+#endif
+}
+
+void
+NvRmPrivMemoryInfo( NvRmDeviceHandle hDevice )
+{
+ NvRmModuleTable *tbl;
+ NvRmModuleInstance *inst;
+
+ tbl = &hDevice->ModuleTable;
+
+ /* Get External memory module info */
+ inst = tbl->ModInst +
+ (tbl->Modules)[NvRmPrivModuleID_ExternalMemory].Index;
+
+ hDevice->ExtMemoryInfo.base = inst->PhysAddr;
+ hDevice->ExtMemoryInfo.size = inst->Length;
+
+ /* Get Iram Memory Module Info .Special handling since iram has 4 banks
+ * and each has a different instance in the relocation table
+ */
+
+ inst = tbl->ModInst + (tbl->Modules)[NvRmPrivModuleID_Iram].Index;
+ hDevice->IramMemoryInfo.base = inst->PhysAddr;
+ hDevice->IramMemoryInfo.size = inst->Length;
+
+ inst++;
+ // Below loop works assuming that relocation table parsing compacted
+ // scattered multiple instances into sequential list
+ while(NvRmPrivDevToModuleID(inst->DeviceId) == NvRmPrivModuleID_Iram)
+ {
+ // The IRAM banks are contigous address of memory. Cannot handle
+ // non-contigous memory for now
+ NV_ASSERT(hDevice->IramMemoryInfo.base +
+ hDevice->IramMemoryInfo.size == inst->PhysAddr);
+
+ hDevice->IramMemoryInfo.size += inst->Length;
+ inst++;
+ }
+
+ if (!(NVCPU_IS_X86 && NVOS_IS_WINDOWS))
+ {
+ /* Get GART memory module info */
+ inst = tbl->ModInst +
+ (tbl->Modules)[NvRmPrivModuleID_Gart].Index;
+ hDevice->GartMemoryInfo.base = inst->PhysAddr;
+ hDevice->GartMemoryInfo.size = inst->Length;
+ }
+}
+
+NvError
+NvRmGetRmcFile( NvRmDeviceHandle hDevice, NvRmRmcFile **file )
+{
+ NV_ASSERT(hDevice);
+
+ *file = &hDevice->rmc;
+ return NvSuccess;
+}
+
+NvRmDeviceHandle NvRmPrivGetRmDeviceHandle()
+{
+ return &gs_Rm;
+}
+
+/**
+ * Initializes pins attributes
+ * @param hRm The RM device handle
+ */
+static void
+NvRmPrivInitPinAttributes(NvRmDeviceHandle rm)
+{
+ NvU32 Count = 0, Offset = 0, Value = 0;
+ NvU32 Major = 0;
+ NvU32 Minor = 0;
+ NvOdmPinAttrib *pPinAttribTable = NULL;
+ NvRmModuleCapability caps[4];
+ NvRmModuleCapability *pCap = NULL;
+
+ NV_ASSERT( rm );
+
+ NvOsMemset(caps, 0, sizeof(caps));
+
+ caps[0].MajorVersion = 1;
+ caps[0].MinorVersion = 0;
+ caps[0].EcoLevel = 0;
+ caps[0].Capability = &caps[0];
+
+ caps[1].MajorVersion = 1;
+ caps[1].MinorVersion = 1;
+ caps[1].EcoLevel = 0;
+
+ caps[2].MajorVersion = 1;
+ caps[2].MinorVersion = 2;
+ caps[2].EcoLevel = 0;
+
+ // the pin attributes for v 1.0 and v1.1 of the misc module
+ // are fully compatible, so the version comparison is made against 1.0
+ // Treating 1.2 same as 1.0/1.1.
+ caps[1].Capability = &caps[0];
+ caps[2].Capability = &caps[0];
+
+ /* AP20 misc module pin attributes, set differently than AP15 as the pin
+ * attribute registers in misc module changed */
+ caps[3].MajorVersion = 2;
+ caps[3].MinorVersion = 0;
+ caps[3].EcoLevel = 0;
+ caps[3].Capability = &caps[3];
+
+ NV_ASSERT_SUCCESS(NvRmModuleGetCapabilities(
+ rm,
+ NvRmModuleID_Misc,
+ caps,
+ sizeof(caps)/sizeof(caps[0]),
+ (void**)&pCap));
+
+ Count = NvOdmQueryPinAttributes((const NvOdmPinAttrib **)&pPinAttribTable);
+
+ for ( ; Count ; Count--, pPinAttribTable++)
+ {
+ Major = (pPinAttribTable->ConfigRegister >> 28);
+ Minor = (pPinAttribTable->ConfigRegister >> 24) & 0xF;
+ if ((Major == pCap->MajorVersion) && (Minor == pCap->MinorVersion))
+ {
+ Offset = pPinAttribTable->ConfigRegister & 0xFFFF;
+ Value = pPinAttribTable->Value;
+ NV_REGW(rm, NvRmModuleID_Misc, 0, Offset, Value);
+ }
+ }
+}
+
+
+static void NvRmPrivBasicReset( NvRmDeviceHandle rm )
+{
+ switch (rm->ChipId.Id) {
+ case 0x15:
+ case 0x16:
+ NvRmPrivAp15BasicReset(rm);
+ return;
+ case 0x20:
+ NvRmPrivAp20BasicReset(rm);
+ return;
+ default:
+ NV_ASSERT(!"Unsupported chip ID");
+ return;
+ }
+}
+
+NvError NvRmPrivMcErrorMonitorStart( NvRmDeviceHandle rm )
+{
+ NvError e = NvError_NotSupported;
+
+ switch (rm->ChipId.Id) {
+ case 0x15:
+ case 0x16:
+ e = NvRmPrivAp15McErrorMonitorStart(rm);
+ break;
+ case 0x20:
+ e = NvRmPrivAp20McErrorMonitorStart(rm);
+ break;
+ default:
+ NV_ASSERT(!"Unsupported chip ID");
+ break;
+ }
+ return e;
+}
+
+void NvRmPrivMcErrorMonitorStop( NvRmDeviceHandle rm )
+{
+ switch (rm->ChipId.Id) {
+ case 0x15:
+ case 0x16:
+ NvRmPrivAp15McErrorMonitorStop(rm);
+ break;
+ case 0x20:
+ NvRmPrivAp20McErrorMonitorStop(rm);
+ break;
+ default:
+ NV_ASSERT(!"Unsupported chip ID");
+ break;
+ }
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init_common.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init_common.c
new file mode 100644
index 000000000000..1b7f348b8e74
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_init_common.c
@@ -0,0 +1,523 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvutil.h"
+#include "nvassert.h"
+#include "nvrm_drf.h"
+#include "nvrm_init.h"
+#include "nvrm_rmctrace.h"
+#include "nvrm_configuration.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_heap.h"
+#include "nvrm_pmu_private.h"
+#include "nvrm_processor.h"
+#include "nvrm_structure.h"
+#include "nvrm_xpc.h"
+#include "ap15rm_private.h"
+#include "ap15rm_private.h"
+#include "ap15rm_clocks.h"
+#include "nvodm_query.h"
+#include "nvodm_query_pins.h"
+#include "common/nvrm_hwintf.h"
+#include "nvrm_pinmux_utils.h"
+#include "nvrm_minikernel.h"
+#include "ap15/arapb_misc.h" // chipid, has to be the same for all chips
+#include "ap15/arapbpm.h"
+#include "ap15/arfuse.h"
+
+extern NvRmCfgMap g_CfgMap[];
+
+void NvRmPrivMemoryInfo( NvRmDeviceHandle hDevice );
+void NvRmPrivReadChipId( NvRmDeviceHandle rm );
+void NvRmPrivGetSku( NvRmDeviceHandle rm );
+/** Returns the pointer to the relocation table */
+NvU32 *NvRmPrivGetRelocationTable( NvRmDeviceHandle hDevice );
+NvError NvRmPrivMapApertures( NvRmDeviceHandle rm );
+void NvRmPrivUnmapApertures( NvRmDeviceHandle rm );
+NvU32 NvRmPrivGetBctCustomerOption(NvRmDeviceHandle hRm);
+
+NvRmCfgMap g_CfgMap[] =
+{
+ { "NV_CFG_RMC_FILE", NvRmCfgType_String, (void *)"",
+ STRUCT_OFFSET(RmConfigurationVariables, RMCTraceFileName) },
+
+ /* don't need chiplib for non-sim builds */
+ { "NV_CFG_CHIPLIB", NvRmCfgType_String, (void *)"",
+ STRUCT_OFFSET(RmConfigurationVariables, Chiplib) },
+
+ { "NV_CFG_CHIPLIB_ARGS", NvRmCfgType_String, (void *)"",
+ STRUCT_OFFSET(RmConfigurationVariables, ChiplibArgs) },
+
+ { 0 }
+};
+
+NvRmModuleTable *
+NvRmPrivGetModuleTable(
+ NvRmDeviceHandle hDevice )
+{
+ return &hDevice->ModuleTable;
+}
+
+NvU32 *
+NvRmPrivGetRelocationTable( NvRmDeviceHandle hDevice )
+{
+ switch( hDevice->ChipId.Id ) {
+ case 0x15:
+ return NvRmPrivAp15GetRelocationTable( hDevice );
+ case 0x16:
+ return NvRmPrivAp16GetRelocationTable( hDevice );
+ case 0x20:
+ return NvRmPrivAp20GetRelocationTable( hDevice );
+ default:
+ NV_ASSERT(!"Invalid Chip" );
+ return 0;
+ }
+}
+
+void
+NvRmPrivReadChipId( NvRmDeviceHandle rm )
+{
+#if (NVCPU_IS_X86 && NVOS_IS_WINDOWS)
+ NvRmChipId *id;
+ NV_ASSERT( rm );
+
+ id = &rm->ChipId;
+
+ id->Family = NvRmChipFamily_HandheldSoc;
+ id->Id = 0x15;
+ id->Major = 0x0;
+ id->Minor = 0x0;
+ id->SKU = 0x0;
+ id->Netlist = 0x0;
+ id->Patch = 0x0;
+#else
+ NvU32 reg;
+ NvRmChipId *id;
+ NvU32 fam;
+ char *s;
+ NvU8 *VirtAddr;
+ NvError e;
+
+ NV_ASSERT( rm );
+ id = &rm->ChipId;
+
+ /* Hard coding the address of the chip ID address space, as we haven't yet
+ * parsed the relocation table.
+ */
+ e = NvRmPhysicalMemMap(0x70000000, 0x1000, NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached, (void **)&VirtAddr);
+ if (e != NvSuccess)
+ {
+ NV_DEBUG_PRINTF(("APB misc aperture map failure\n"));
+ return;
+ }
+
+ /* chip id is in the misc aperture */
+ reg = NV_READ32( VirtAddr + APB_MISC_GP_HIDREV_0 );
+ id->Id = (NvU16)NV_DRF_VAL( APB_MISC_GP, HIDREV, CHIPID, reg );
+ id->Major = (NvU8)NV_DRF_VAL( APB_MISC_GP, HIDREV, MAJORREV, reg );
+ id->Minor = (NvU8)NV_DRF_VAL( APB_MISC_GP, HIDREV, MINORREV, reg );
+
+ fam = NV_DRF_VAL( APB_MISC_GP, HIDREV, HIDFAM, reg );
+ switch( fam ) {
+ case APB_MISC_GP_HIDREV_0_HIDFAM_GPU:
+ id->Family = NvRmChipFamily_Gpu;
+ s = "GPU";
+ break;
+ case APB_MISC_GP_HIDREV_0_HIDFAM_HANDHELD:
+ id->Family = NvRmChipFamily_Handheld;
+ s = "Handheld";
+ break;
+ case APB_MISC_GP_HIDREV_0_HIDFAM_BR_CHIPS:
+ id->Family = NvRmChipFamily_BrChips;
+ s = "BrChips";
+ break;
+ case APB_MISC_GP_HIDREV_0_HIDFAM_CRUSH:
+ id->Family = NvRmChipFamily_Crush;
+ s = "Crush";
+ break;
+ case APB_MISC_GP_HIDREV_0_HIDFAM_MCP:
+ id->Family = NvRmChipFamily_Mcp;
+ s = "MCP";
+ break;
+ case APB_MISC_GP_HIDREV_0_HIDFAM_CK:
+ id->Family = NvRmChipFamily_Ck;
+ s = "Ck";
+ break;
+ case APB_MISC_GP_HIDREV_0_HIDFAM_VAIO:
+ id->Family = NvRmChipFamily_Vaio;
+ s = "Vaio";
+ break;
+ case APB_MISC_GP_HIDREV_0_HIDFAM_HANDHELD_SOC:
+ id->Family = NvRmChipFamily_HandheldSoc;
+ s = "Handheld SOC";
+ break;
+ default:
+ NV_ASSERT( !"bad chip family" );
+ NvRmPhysicalMemUnmap(VirtAddr, 0x1000);
+ return;
+ }
+
+ reg = NV_READ32( VirtAddr + APB_MISC_GP_EMU_REVID_0 );
+ id->Netlist = (NvU16)NV_DRF_VAL( APB_MISC_GP, EMU_REVID, NETLIST, reg );
+ id->Patch = (NvU16)NV_DRF_VAL( APB_MISC_GP, EMU_REVID, PATCH, reg );
+
+ if( id->Major == 0 )
+ {
+ char *emu;
+ if( id->Netlist == 0 )
+ {
+ NvOsDebugPrintf( "Simulation Chip: 0x%x\n", id->Id );
+ }
+ else
+ {
+ if( id->Minor == 0 )
+ {
+ emu = "QuickTurn";
+ }
+ else
+ {
+ emu = "FPGA";
+ }
+
+ NvOsDebugPrintf( "Emulation (%s) Chip: 0x%x Netlist: 0x%x "
+ "Patch: 0x%x\n", emu, id->Id, id->Netlist, id->Patch );
+ }
+ }
+ else
+ {
+ // on real silicon
+
+ NvRmPrivGetSku( rm );
+
+ NvOsDebugPrintf( "Chip Id: 0x%x (%s) Major: 0x%x Minor: 0x%x "
+ "SKU: 0x%x\n", id->Id, s, id->Major, id->Minor, id->SKU );
+ }
+
+ // add a sanity check here, so that if we think we are on sim, but don't
+ // detect a sim/quickturn netlist bail out with an error
+ if ( NvRmIsSimulation() && id->Major != 0 )
+ {
+ // this should all get optimized away in release builds because the
+ // above will get evaluated to if ( 0 )
+ NV_ASSERT(!"invalid major version number for simulation");
+ }
+ NvRmPhysicalMemUnmap(VirtAddr, 0x1000);
+#endif
+}
+
+void
+NvRmPrivGetSku( NvRmDeviceHandle rm )
+{
+ NvError e;
+ NvRmChipId *id;
+ NvU8 *FuseVirt;
+ NvU32 reg;
+
+#if NV_USE_FUSE_CLOCK_ENABLE
+ NvU8 *CarVirt = 0;
+#endif
+
+ NV_ASSERT( rm );
+ id = &rm->ChipId;
+
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Enable fuse clock
+ e = NvRmPhysicalMemMap(0x60006000, 0x1000, NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached, (void **)&CarVirt);
+ if (e == NvSuccess)
+ {
+ reg = NV_READ32(CarVirt + CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0);
+ reg |= 0x80;
+ NV_WRITE32(CarVirt + CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0, reg);
+ }
+#endif
+
+ /* Read the fuse only on real silicon, as it was not gauranteed to be
+ * preset on the eluation/simulation platforms.
+ */
+ e = NvRmPhysicalMemMap(0x7000f800, 0x400, NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached, (void **)&FuseVirt);
+ if (e == NvSuccess)
+ {
+ // Read the SKU from the fuse module.
+ reg = NV_READ32( FuseVirt + FUSE_SKU_INFO_0 );
+ id->SKU = (NvU16)reg;
+ NvRmPhysicalMemUnmap(FuseVirt, 0x400);
+
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Disable fuse clock
+ if (CarVirt)
+ {
+ reg = NV_READ32(CarVirt + CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0);
+ reg &= ~0x80;
+ NV_WRITE32(CarVirt + CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0, reg);
+ NvRmPhysicalMemUnmap(CarVirt, 0x1000);
+ }
+#endif
+ } else
+ {
+ NV_ASSERT(!"Cannot map the FUSE aperture to get the SKU");
+ id->SKU = 0;
+ }
+}
+
+NvError
+NvRmPrivMapApertures( NvRmDeviceHandle rm )
+{
+ NvRmModuleTable *tbl;
+ NvRmModuleInstance *inst;
+ NvRmModule *mod;
+ NvU32 devid;
+ NvU32 i;
+ NvError e;
+
+ NV_ASSERT( rm );
+
+ /* loop over the instance list and map everything */
+ tbl = &rm->ModuleTable;
+ mod = tbl->Modules;
+ for( i = 0; i < NvRmPrivModuleID_Num; i++ )
+ {
+ if( mod[i].Index == NVRM_MODULE_INVALID )
+ {
+ continue;
+ }
+
+ if ((i != NvRmPrivModuleID_Ahb_Arb_Ctrl ) &&
+ (i != NvRmPrivModuleID_ApbDma ) &&
+ (i != NvRmPrivModuleID_ApbDmaChannel ) &&
+ (i != NvRmPrivModuleID_ClockAndReset ) &&
+ (i != NvRmPrivModuleID_ExternalMemoryController ) &&
+ (i != NvRmPrivModuleID_Gpio ) &&
+ (i != NvRmPrivModuleID_Interrupt ) &&
+ (i != NvRmPrivModuleID_InterruptArbGnt ) &&
+ (i != NvRmPrivModuleID_InterruptDrq ) &&
+ (i != NvRmPrivModuleID_MemoryController ) &&
+ (i != NvRmModuleID_Misc) &&
+ (i != NvRmPrivModuleID_ArmPerif) &&
+ (i != NvRmModuleID_3D) &&
+ (i != NvRmModuleID_CacheMemCtrl ) &&
+ (i != NvRmModuleID_Display) &&
+ (i != NvRmModuleID_Dvc) &&
+ (i != NvRmModuleID_FlowCtrl ) &&
+ (i != NvRmModuleID_Fuse ) &&
+ (i != NvRmModuleID_GraphicsHost ) &&
+ (i != NvRmModuleID_I2c) &&
+ (i != NvRmModuleID_Isp) &&
+ (i != NvRmModuleID_Mpe) &&
+ (i != NvRmModuleID_Pmif ) &&
+ (i != NvRmModuleID_Mipi ) &&
+ (i != NvRmModuleID_ResourceSema ) &&
+ (i != NvRmModuleID_SysStatMonitor ) &&
+ (i != NvRmModuleID_TimerUs ) &&
+ (i != NvRmModuleID_Vde ) &&
+ (i != NvRmModuleID_ExceptionVector ) &&
+ (i != NvRmModuleID_Usb2Otg ) &&
+ (i != NvRmModuleID_Vi)
+ )
+ {
+ continue;
+ }
+
+ /* FIXME If the multiple instances of the same module is adjacent to
+ * each other then we can do one allocation for all those modules.
+ */
+
+ /* map all of the device instances */
+ inst = tbl->ModInst + mod[i].Index;
+ devid = inst->DeviceId;
+ while( devid == inst->DeviceId )
+ {
+ /* If this is a device that actually has an aperture... */
+ if (inst->PhysAddr)
+ {
+ e = NvRmPhysicalMemMap(
+ inst->PhysAddr, inst->Length, NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached, &inst->VirtAddr);
+ if (e != NvSuccess)
+ {
+ NV_DEBUG_PRINTF(("Device %d at physical addr 0x%X has no "
+ "virtual mapping\n", devid, inst->PhysAddr));
+ return e;
+ }
+ }
+
+ inst++;
+ }
+ }
+
+ return NvSuccess;
+}
+
+void
+NvRmPrivUnmapApertures( NvRmDeviceHandle rm )
+{
+ NvRmModuleTable *tbl;
+ NvRmModuleInstance *inst;
+ NvRmModule *mod;
+ NvU32 devid;
+ NvU32 i;
+
+ NV_ASSERT( rm );
+
+ /* loop over the instance list and unmap everything */
+ tbl = &rm->ModuleTable;
+ mod = tbl->Modules;
+ for( i = 0; i < NvRmPrivModuleID_Num; i++ )
+ {
+ if( mod[i].Index == NVRM_MODULE_INVALID )
+ {
+ continue;
+ }
+
+ /* map all of the device instances */
+ inst = tbl->ModInst + mod[i].Index;
+ devid = inst->DeviceId;
+ while( devid == inst->DeviceId )
+ {
+ NvRmPhysicalMemUnmap( inst->VirtAddr, inst->Length );
+ inst++;
+ }
+ }
+}
+
+NvU32
+NvRmPrivGetBctCustomerOption(NvRmDeviceHandle hRm)
+{
+ if (!NvRmIsSimulation())
+ {
+ return NV_REGR(hRm, NvRmModuleID_Pmif, 0, APBDEV_PMC_SCRATCH20_0);
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+NvRmChipId *
+NvRmPrivGetChipId(
+ NvRmDeviceHandle hDevice )
+{
+ return &hDevice->ChipId;
+}
+
+#if !NV_OAL
+void NvRmBasicInit(NvRmDeviceHandle * pHandle)
+{
+ NvRmDevice *rm = 0;
+ NvError err;
+ NvU32 *table = 0;
+ NvU32 BctCustomerOption = 0;
+
+ *pHandle = 0;
+ rm = NvRmPrivGetRmDeviceHandle();
+
+ if( rm->bBasicInit )
+ {
+ *pHandle = rm;
+ return;
+ }
+
+ /* get the default configuration */
+ err = NvRmPrivGetDefaultCfg( g_CfgMap, &rm->cfg );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ /* get the requested configuration */
+ err = NvRmPrivReadCfgVars( g_CfgMap, &rm->cfg );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ /* Read the chip Id and store in the Rm structure. */
+ NvRmPrivReadChipId( rm );
+
+ // init the module control (relocation table, resets, etc.)
+ table = NvRmPrivGetRelocationTable( rm );
+ if( !table )
+ {
+ goto fail;
+ }
+
+ err = NvRmPrivModuleInit( &rm->ModuleTable, table );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ NvRmPrivMemoryInfo( rm );
+
+ // setup the hw apertures
+ err = NvRmPrivMapApertures( rm );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ BctCustomerOption = NvRmPrivGetBctCustomerOption(rm);
+ err = NvRmPrivInitKeyList(rm, &BctCustomerOption, 1);
+ if (err != NvSuccess)
+ {
+ goto fail;
+ }
+
+ // Now populate the logical interrupt table.
+ NvRmPrivInterruptTableInit( rm );
+
+ rm->bBasicInit = NV_TRUE;
+ // basic init is a super-set of preinit
+ rm->bPreInit = NV_TRUE;
+ *pHandle = rm;
+
+fail:
+ return;
+}
+
+void
+NvRmBasicClose(NvRmDeviceHandle handle)
+{
+ if (!NVOS_IS_WINDOWS_X86)
+ {
+ NvRmPrivDeInitKeyList(handle);
+ /* unmap the apertures */
+ NvRmPrivUnmapApertures( handle );
+ /* deallocate the instance table */
+ NvRmPrivModuleDeinit( &handle->ModuleTable );
+ }
+}
+#endif
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_interrupt.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_interrupt.c
new file mode 100644
index 000000000000..db6c1beb2d66
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_interrupt.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvos.h"
+#include "nvrm_module.h"
+#include "nvrm_interrupt.h"
+#include "nvrm_processor.h"
+#include "nvassert.h"
+#include "nvrm_relocation_table.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_drf.h"
+#include "nvrm_structure.h"
+#include "ap15rm_private.h"
+#include "ap15/arictlr.h"
+
+#define NVRM_ENABLE_PRINTF 0 // Module debug: 0=disable, 1=enable
+
+#if (NV_DEBUG && NVRM_ENABLE_PRINTF)
+#define NVRM_INTERRUPT_PRINTF(x) NvOsDebugPrintf x
+#else
+#define NVRM_INTERRUPT_PRINTF(x)
+#endif
+
+//-----------------------------------------------------------------------------
+// Register access macros
+//-----------------------------------------------------------------------------
+
+#define NV_INTR_REGR(rm,inst,reg) NV_REGR(rm, NvRmPrivModuleID_Interrupt, inst, ICTLR_##reg##_0)
+#define NV_INTR_REGW(rm,inst,reg,data) NV_REGW(rm, NvRmPrivModuleID_Interrupt, inst, ICTLR_##reg##_0, data)
+
+#define NV_REGA(rm, aperture, instance, offset) \
+ ((volatile void*)((NvUPtr)(((rm)->ModuleTable.ModInst + (rm)->ModuleTable.Modules[(aperture)].Index + (instance))->VirtAddr) + (offset)))
+
+#define NV_INTR_REG_READ(pIntr, reg) NV_READ32((((NvUPtr)(pIntr)) + ICTLR_##reg##_0))
+
+NvRmIntrDecoder gs_Ap15PrimaryDecoder =
+ /* AP15 Primary interrupt controller */
+ {NvRmPrivModuleID_Interrupt,
+ NVRM_IRQS_PER_INTR_CTLR, 0, {0}, {0}, {0} };
+
+NvRmIntrDecoder gs_Ap20PrimaryDecoder =
+ /* AP20 Primary interrupt controller */
+ {NvRmPrivModuleID_ArmPerif,
+ NVRM_IRQS_PER_INTR_CTLR * 5, 0, {0}, {0}, {0} };
+
+
+NvRmIntrDecoder *gs_PrimaryDecoder = &gs_Ap15PrimaryDecoder;
+
+NvRmIntrDecoder gs_SubDecoder[] =
+{
+ /* Secondary interrupt controllers */
+
+ /* Secondary interrupt controller for APB DMA */
+ {NvRmPrivModuleID_ApbDma,
+ NVRM_MAX_DMA_CHANNELS, 0, {0}, {0}, {0}},
+
+ /* GPIO secondary interrupt controller */
+ {NvRmPrivModuleID_Gpio,
+ NVRM_IRQS_PER_GPIO_CTLR, 0, {0}, {0}, {0}},
+};
+
+
+
+static NvU16
+NvRmPrivSubControllerInit(
+ NvRmDeviceHandle hRmDevice,
+ NvRmIntrDecoderHandle pDecoder,
+ NvU16 Irq)
+{
+ NvRmModuleInstance *inst; // Pointer to the module instance
+ NvU8 num; // Number of instances/loop index
+ NvU32 devid; // Hardware device id
+ NvError e;
+
+ NV_ASSERT( hRmDevice );
+
+ NV_CHECK_ERROR_CLEANUP( NvRmPrivGetModuleInstance( hRmDevice,
+ pDecoder->ModuleID, &inst) );
+ NV_ASSERT(inst != NULL);
+
+ num = 0;
+ devid = inst->DeviceId;
+ /* Get all the instances of that sub-controller */
+ while ( devid == inst->DeviceId )
+ {
+ NV_ASSERT( inst->IrqMap != NULL );
+ NV_ASSERT( num < NVRM_MAX_INSTANCES);
+
+ /* For modules which are sub-interrupt controllers, IRQ value in the
+ * IrqMap[0] represents the IRQ of the main interrupt controller. Sub
+ * IRQs for that controller can be computed from IndexMax and IndexBase
+ * members */
+ inst->IrqMap->IndexMax = pDecoder->SubIrqCount;
+ inst->IrqMap->IndexBase = Irq;
+
+ pDecoder->MainIrq[num] = inst->IrqMap->Irq[0];
+ pDecoder->SubIrqFirst[num] = Irq;
+ pDecoder->SubIrqLast[num] = Irq + pDecoder->SubIrqCount - 1;
+
+ Irq += pDecoder->SubIrqCount;
+ inst++;
+ num++;
+ }
+ pDecoder->NumberOfInstances = num;
+
+ return Irq;
+fail:
+ NV_ASSERT(!"Invalid ModuleID or Instance in ap15rm_interrupt");
+ return 0;
+}
+
+static
+NvU16 NvRmPrivMainControllerInit(NvRmDeviceHandle hRmDevice,
+ NvRmIntrDecoder *pDecoder)
+{
+ NvRmModuleInstance *inst; // Pointer to the module instance
+ NvU32 num = 0;
+ NvU16 irq = 0; // Primary controller will start with IRQ 0.
+ NvU16 devid;
+ NvError e;
+
+ NV_CHECK_ERROR_CLEANUP(
+ NvRmPrivGetModuleInstance( hRmDevice, pDecoder->ModuleID, &inst)
+ );
+
+ NV_ASSERT(inst != NULL);
+ devid = inst->DeviceId;
+
+ while( devid == inst->DeviceId )
+ {
+ pDecoder->SubIrqFirst[num] = irq;
+ pDecoder->SubIrqLast[num] = irq + pDecoder->SubIrqCount - 1;
+ pDecoder->MainIrq[num] = NVRM_IRQ_INVALID;
+
+ irq += pDecoder->SubIrqCount;
+ num++;
+ inst++;
+ }
+
+ pDecoder->NumberOfInstances = num;
+ return irq;
+fail:
+ NV_ASSERT(!"Invalid ModuleID or Instance in ap15rm_interrupt");
+ return 0;
+}
+
+void NvRmPrivInterruptTableInit( NvRmDeviceHandle hRmDevice )
+{
+ NvU16 irq;
+ NvU32 subDecoder;
+
+ NV_ASSERT( hRmDevice );
+
+ NVRM_CAP_CLEAR(hRmDevice, NvRmCaps_HasFalconInterruptController);
+ NVRM_CAP_CLEAR(hRmDevice, NvRmCaps_Has128bitInterruptSerializer);
+
+ // WARNING: the falcon interrupt controller is not in simulation!
+ if( NvRmIsSimulation() == NV_FALSE && hRmDevice->ChipId.Id >= 0x20)
+ {
+ NVRM_CAP_SET(hRmDevice, NvRmCaps_HasFalconInterruptController);
+
+ if (hRmDevice->ChipId.Major == 0
+ && hRmDevice->ChipId.Netlist != 0
+ && hRmDevice->ChipId.Minor != 0 )
+ {
+ /* PALAU has 128-bit interrupt serializer and needs some WARs to
+ * compensate for the delays in interrupt arrival at interrupt
+ * controller */
+ NVRM_CAP_SET(hRmDevice, NvRmCaps_Has128bitInterruptSerializer);
+ }
+ }
+
+ if (!NVRM_IS_CAP_SET(hRmDevice, NvRmCaps_HasFalconInterruptController))
+ {
+ gs_PrimaryDecoder = &gs_Ap15PrimaryDecoder;
+ }
+ else
+ {
+ gs_PrimaryDecoder = &gs_Ap20PrimaryDecoder;
+ }
+
+ irq = NvRmPrivMainControllerInit(hRmDevice, gs_PrimaryDecoder);
+
+ subDecoder = NV_ARRAY_SIZE(gs_SubDecoder);
+ while (subDecoder)
+ {
+ subDecoder --;
+ irq = NvRmPrivSubControllerInit(hRmDevice,
+ &(gs_SubDecoder[subDecoder]), irq);
+ }
+
+ hRmDevice->MaxIrqs = irq;
+ NVRM_INTERRUPT_PRINTF(("MAX IRQs: %d\n", irq));
+}
+
+NvU32 NvRmGetIrqForLogicalInterrupt(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID ModuleID,
+ NvU32 Index)
+{
+ NvRmModuleInstance* inst = NULL; // Pointer to module instance
+ NvRmModuleIrqMap* pIrqMap; // Pointer to module IRQ map
+ NvU16 irq = 0;
+ NvError e;
+ NV_ASSERT( hRmDevice );
+
+
+ NV_CHECK_ERROR_CLEANUP( NvRmPrivGetModuleInstance( hRmDevice,
+ ModuleID, &inst) );
+ if ( inst == NULL || inst->IrqMap == NULL)
+ {
+ // NV_ASSERT(!"Illegal call\n");
+ // Is this legal? Some clients like NVBL
+ // is calling this API blindly as they don't know if this module
+ // supports interrupt or not. I don't know if this good or bad. Why
+ // would a clinet request IRQ, if they know the underying module
+ // doesn'tsupport interrupts.
+ return NVRM_IRQ_INVALID;
+ }
+
+ pIrqMap = inst->IrqMap;
+
+ /* Check if this the interrupt for this module is routed to secondary
+ * interrupt controller or to the main controller */
+ /* FIXME rename IndexMax and IndexBase variables to SubInterruptCount and
+ * SubInterruptBase */
+ if (pIrqMap->IndexMax == 0)
+ {
+ NV_ASSERT (Index < pIrqMap->IrqCount);
+ NV_ASSERT(pIrqMap->Irq[Index] != NVRM_IRQ_INVALID);
+
+ irq = pIrqMap->Irq[Index];
+ }
+ /* Secondary interrupt controller */
+ else
+ {
+ // Requesting controller's main interrupt? This is a hack used by the
+ // OAL to get the main IRQ line for the sub-deocders. OAL builds a list
+ // of all the main IRQs for the sub-decoders and asserts if someone
+ // tries to register an interrupt handler for the main IRQ line.
+ if (Index == 0xFF)
+ {
+ NV_ASSERT (pIrqMap->Irq[0] != NVRM_IRQ_INVALID);
+ irq = pIrqMap->Irq[0];
+ } else
+ {
+ /* Index cannot be more than the Max IRQs registered by that
+ * secondary interrupt controller */
+ NV_ASSERT( Index < pIrqMap->IndexMax );
+ irq = pIrqMap->IndexBase + Index;
+ }
+ }
+ return irq;
+fail:
+ NV_ASSERT(!"Invalid ModuleID or Instance in ap15rm_interrupt");
+ return 0;
+}
+
+NvU32 NvRmGetIrqCountForLogicalInterrupt(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID ModuleID)
+{
+ NvRmModuleInstance *inst = NULL;
+ NvError e;
+
+ NV_ASSERT( hRmDevice );
+
+ NV_CHECK_ERROR_CLEANUP( NvRmPrivGetModuleInstance( hRmDevice,
+ ModuleID, &inst) );
+ if ( inst == NULL || inst->IrqMap == NULL)
+ {
+ // NV_ASSERT(!"Illegal call\n");
+ // Is this legal? Some clients like NVBL are calling this API blindly
+ // as they don't know if this module supports interrupt or not.
+ // I don't know if this good or bad. Why would a clinet request IRQ,
+ // if they know the underying module doesn't support interrupts.
+ return 0;
+ }
+
+ return inst->IrqMap->IrqCount;
+fail:
+ NV_ASSERT(!"Invalid ModuleID or Instance in ap15rm_interrupt");
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_interrupt_generic.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_interrupt_generic.c
new file mode 100644
index 000000000000..98c96ef08390
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_interrupt_generic.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvos.h"
+#include "ap15rm_private.h"
+#include "nvrm_interrupt.h"
+#include "nvrm_chiplib.h"
+#include "nvintr.h"
+
+void NvRmPrivChiplibInterruptHandler( void );
+
+NvError NvRmInterruptRegister(
+ NvRmDeviceHandle hRmDevice,
+ NvU32 IrqListSize,
+ const NvU32 *pIrqList,
+ const NvOsInterruptHandler *pIrqHandlerList,
+ void *context,
+ NvOsInterruptHandle *handle,
+ NvBool InterruptEnable)
+{
+ NvError err;
+
+ err = NvOsInterruptRegister(IrqListSize,
+ pIrqList,
+ pIrqHandlerList,
+ context,
+ handle,
+ InterruptEnable);
+
+ return err;
+}
+
+void NvRmInterruptUnregister(
+ NvRmDeviceHandle hRmDevice,
+ NvOsInterruptHandle handle)
+{
+ NvOsInterruptUnregister( handle );
+}
+
+NvError NvRmInterruptEnable(
+ NvRmDeviceHandle hRmDevice,
+ NvOsInterruptHandle handle)
+{
+ return NvOsInterruptEnable(handle);
+}
+
+void NvRmInterruptDone( NvOsInterruptHandle handle )
+{
+ NvOsInterruptDone( handle );
+}
+
+/* There is no chiplib interrupt handler for wince */
+void NvRmPrivChiplibInterruptHandler( void )
+{
+ return;
+}
+
+void NvRmPrivInterruptStart(NvRmDeviceHandle hRmDevice)
+{
+ return;
+}
+
+void NvRmPrivInterruptShutdown(NvRmDeviceHandle handle)
+{
+ return;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_memctrl.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_memctrl.c
new file mode 100644
index 000000000000..944950428d00
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_memctrl.c
@@ -0,0 +1,564 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvrm_init.h"
+#include "nvassert.h"
+#include "nvos.h"
+#include "nvrm_module.h"
+#include "ap15/aremc.h"
+#include "ap15/armc.h"
+#include "ap15/arapb_misc.h"
+#include "ap15rm_private.h"
+#include "nvrm_drf.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_memctrl.h"
+#include "nvrm_clocks.h"
+#include "nvrm_structure.h"
+#include "nvrm_arm_cp.h"
+#include "nvrm_processor.h"
+
+//define obs_struct
+typedef struct ObsInfoRec
+{
+ NvRmModuleID modSelect;
+ NvU32 partSelect;
+} ObsInfo;
+
+#define OBS_INFO_FIELD(modID, partition) \
+ { \
+ NvRmModuleID_##modID, \
+ APB_MISC_GP_OBSCTRL_0_OBS_PART_SEL_##partition \
+ }
+
+// static table correspond to enum NvRmModuleID in \include\nvrm_module.idl
+// Expand this table to add more moduleID - partition map entries.
+static const ObsInfo ObsInfoTable[] =
+{
+ OBS_INFO_FIELD(Cpu, CPU),
+ OBS_INFO_FIELD(Display, DIS),
+ OBS_INFO_FIELD(Csi, DIS),
+ OBS_INFO_FIELD(Hdmi, DIS),
+ OBS_INFO_FIELD(Tvo, DIS),
+ OBS_INFO_FIELD(Dsi, DIS),
+ OBS_INFO_FIELD(2D, GR),
+ OBS_INFO_FIELD(Fuse, GR),
+ OBS_INFO_FIELD(Vde, VDE),
+ OBS_INFO_FIELD(Isp, VE)
+};
+
+static const NvU32 ObsInfoTableSize =
+ NV_ARRAY_SIZE(ObsInfoTable);
+
+
+void
+McStat_Start(
+ NvRmDeviceHandle rm,
+ NvU32 client_id_0,
+ NvU32 client_id_1,
+ NvU32 llc_client_id)
+{
+ NvU32 emc_ctrl =
+ (AREMC_STAT_CONTROL_MODE_BANDWIDTH << AREMC_STAT_CONTROL_MODE_SHIFT) |
+ (AREMC_STAT_CONTROL_EVENT_QUALIFIED << AREMC_STAT_CONTROL_EVENT_SHIFT) |
+ (AREMC_STAT_CONTROL_CLIENT_TYPE_CMCR <<
+ AREMC_STAT_CONTROL_CLIENT_TYPE_SHIFT) | // default is CMC Read client
+ (AREMC_STAT_CONTROL_FILTER_CLIENT_ENABLE <<
+ AREMC_STAT_CONTROL_FILTER_CLIENT_SHIFT) |
+ (AREMC_STAT_CONTROL_FILTER_ADDR_DISABLE <<
+ AREMC_STAT_CONTROL_FILTER_ADDR_SHIFT);
+
+ NvU32 mc_filter_client_0 = (ARMC_STAT_CONTROL_FILTER_CLIENT_ENABLE <<
+ ARMC_STAT_CONTROL_FILTER_CLIENT_SHIFT);
+
+ NvU32 mc_filter_client_1 = (ARMC_STAT_CONTROL_FILTER_CLIENT_ENABLE <<
+ ARMC_STAT_CONTROL_FILTER_CLIENT_SHIFT);
+
+ if (client_id_0 == 0xffffffff)
+ {
+ mc_filter_client_0 = (ARMC_STAT_CONTROL_FILTER_CLIENT_DISABLE <<
+ ARMC_STAT_CONTROL_FILTER_CLIENT_SHIFT);
+ client_id_0 = 0;
+ }
+
+ if (client_id_1 == 0xffffffff)
+ {
+ mc_filter_client_1 = (ARMC_STAT_CONTROL_FILTER_CLIENT_DISABLE <<
+ ARMC_STAT_CONTROL_FILTER_CLIENT_SHIFT);
+ client_id_1 = 0;
+ }
+
+ if(llc_client_id == 1)
+ emc_ctrl |= AREMC_STAT_CONTROL_CLIENT_TYPE_MPCORER <<
+ AREMC_STAT_CONTROL_CLIENT_TYPE_SHIFT;
+ // overwrite with MPCore read
+ NV_REGW(rm, NvRmPrivModuleID_ExternalMemoryController,
+ 0, EMC_STAT_CONTROL_0,
+ NV_DRF_DEF(EMC, STAT_CONTROL, LLMC_GATHER,DISABLE));
+ NV_REGW(rm, NvRmPrivModuleID_ExternalMemoryController,
+ 0, EMC_STAT_LLMC_CLOCK_LIMIT_0, 0xffffffff);
+ NV_REGW(rm, NvRmPrivModuleID_ExternalMemoryController,
+ 0, EMC_STAT_LLMC_CONTROL_0_0, emc_ctrl);
+ NV_REGW(rm, NvRmPrivModuleID_ExternalMemoryController,
+ 0, EMC_STAT_CONTROL_0,
+ NV_DRF_DEF(EMC, STAT_CONTROL, LLMC_GATHER, CLEAR));
+ NV_REGW(rm, NvRmPrivModuleID_ExternalMemoryController,
+ 0, EMC_STAT_CONTROL_0,
+ NV_DRF_DEF(EMC, STAT_CONTROL, LLMC_GATHER, ENABLE));
+ NV_REGW(rm, NvRmPrivModuleID_MemoryController,
+ 0, MC_STAT_CONTROL_0,
+ NV_DRF_DEF(MC, STAT_CONTROL, EMC_GATHER, DISABLE));
+ NV_REGW(rm, NvRmPrivModuleID_MemoryController,
+ 0, MC_STAT_EMC_CLOCK_LIMIT_0, 0xffffffff);
+ NV_REGW(rm, NvRmPrivModuleID_MemoryController,
+ 0, MC_STAT_EMC_CONTROL_0_0,
+ (ARMC_STAT_CONTROL_MODE_BANDWIDTH <<
+ ARMC_STAT_CONTROL_MODE_SHIFT) |
+ (client_id_0 << ARMC_STAT_CONTROL_CLIENT_ID_SHIFT) |
+ (ARMC_STAT_CONTROL_EVENT_QUALIFIED <<
+ ARMC_STAT_CONTROL_EVENT_SHIFT) |
+ mc_filter_client_0 |
+ (ARMC_STAT_CONTROL_FILTER_ADDR_DISABLE <<
+ ARMC_STAT_CONTROL_FILTER_ADDR_SHIFT) |
+ (ARMC_STAT_CONTROL_FILTER_PRI_DISABLE <<
+ ARMC_STAT_CONTROL_FILTER_PRI_SHIFT) |
+ (ARMC_STAT_CONTROL_FILTER_COALESCED_DISABLE <<
+ ARMC_STAT_CONTROL_FILTER_COALESCED_SHIFT));
+ NV_REGW(rm, NvRmPrivModuleID_MemoryController,
+ 0, MC_STAT_EMC_CONTROL_1_0,
+ (ARMC_STAT_CONTROL_MODE_BANDWIDTH <<
+ ARMC_STAT_CONTROL_MODE_SHIFT) |
+ (client_id_1 << ARMC_STAT_CONTROL_CLIENT_ID_SHIFT) |
+ (ARMC_STAT_CONTROL_EVENT_QUALIFIED <<
+ ARMC_STAT_CONTROL_EVENT_SHIFT) |
+ mc_filter_client_1 |
+ (ARMC_STAT_CONTROL_FILTER_ADDR_DISABLE <<
+ ARMC_STAT_CONTROL_FILTER_ADDR_SHIFT) |
+ (ARMC_STAT_CONTROL_FILTER_PRI_DISABLE <<
+ ARMC_STAT_CONTROL_FILTER_PRI_SHIFT) |
+ (ARMC_STAT_CONTROL_FILTER_COALESCED_DISABLE <<
+ ARMC_STAT_CONTROL_FILTER_COALESCED_SHIFT));
+
+ NV_REGW(rm, NvRmPrivModuleID_MemoryController,
+ 0, MC_STAT_CONTROL_0,
+ NV_DRF_DEF(MC, STAT_CONTROL, EMC_GATHER, CLEAR));
+ NV_REGW(rm, NvRmPrivModuleID_MemoryController,
+ 0, MC_STAT_CONTROL_0,
+ NV_DRF_DEF(MC, STAT_CONTROL, EMC_GATHER, ENABLE));
+}
+
+void
+McStat_Stop(
+ NvRmDeviceHandle rm,
+ NvU32 *client_0_cycles,
+ NvU32 *client_1_cycles,
+ NvU32 *llc_client_cycles,
+ NvU32 *llc_client_clocks,
+ NvU32 *mc_clocks)
+{
+ *llc_client_cycles = NV_REGR(rm, NvRmPrivModuleID_ExternalMemoryController,
+ 0, EMC_STAT_LLMC_COUNT_0_0);
+ *llc_client_clocks = NV_REGR(rm, NvRmPrivModuleID_ExternalMemoryController,
+ 0, EMC_STAT_LLMC_CLOCKS_0);
+ *client_0_cycles = NV_REGR(rm, NvRmPrivModuleID_MemoryController,
+ 0, MC_STAT_EMC_COUNT_0_0);
+ *client_1_cycles = NV_REGR(rm, NvRmPrivModuleID_MemoryController,
+ 0, MC_STAT_EMC_COUNT_1_0);
+ *mc_clocks = NV_REGR(rm, NvRmPrivModuleID_MemoryController,
+ 0, MC_STAT_EMC_CLOCKS_0);
+}
+
+void
+McStat_Report(
+ NvU32 client_id_0,
+ NvU32 client_0_cycles,
+ NvU32 client_id_1,
+ NvU32 client_1_cycles,
+ NvU32 llc_client_id,
+ NvU32 llc_client_clocks,
+ NvU32 llc_client_cycles,
+ NvU32 mc_clocks)
+{
+ NvOsDebugPrintf("LLC Client %d Count: 0x%.8X, %u\n",
+ llc_client_id, llc_client_cycles, llc_client_cycles);
+ NvOsDebugPrintf("LLC Client %d Clocks: 0x%.8X, %u\n",
+ llc_client_id, llc_client_clocks, llc_client_clocks);
+ NvOsDebugPrintf("Client %.3d Count: 0x%.8X, %u\n",
+ client_id_0, client_0_cycles, client_0_cycles);
+ NvOsDebugPrintf("Client %.3d Count: 0x%.8X, %u\n",
+ client_id_1, client_1_cycles, client_1_cycles);
+ NvOsDebugPrintf("Total MC Clocks: 0x%.8X, %u\n", mc_clocks, mc_clocks);
+}
+
+//API to read data from OBS bus
+// The OBS_PART_SEL is mapped to the specified modID by obsInfoTable which is public in this file.
+
+NvError
+ReadObsData(
+ NvRmDeviceHandle rm,
+ NvRmModuleID modID,
+ NvU32 start_index,
+ NvU32 length,
+ NvU32 *value)
+{
+ NvU32 i = 0, offset = 0, value1, value2;
+ NvU32 timeout;
+ NvU32 partID = 0xffffffff;
+ NvU32 index, temp;
+
+ for (i = 0; i < ObsInfoTableSize; i++)
+ {
+ if (modID == ObsInfoTable[i].modSelect)
+ {
+ partID = ObsInfoTable[i].partSelect;
+ break;
+ }
+ }
+ if (i == ObsInfoTableSize)
+ {
+ return NvError_BadParameter;
+ }
+
+ for(offset = 0; offset < length; offset++)
+ {
+ index = start_index + offset;
+ temp = NV_DRF_DEF(APB_MISC_GP, OBSCTRL, OBS_EN, ENABLE) |
+ NV_DRF_NUM(APB_MISC_GP, OBSCTRL, OBS_MOD_SEL, modID) |
+ NV_DRF_NUM(APB_MISC_GP, OBSCTRL, OBS_PART_SEL, partID) |
+ NV_DRF_NUM(APB_MISC_GP, OBSCTRL, OBS_SIG_SEL, index) ;
+ NV_REGW(rm, NvRmModuleID_Misc, 0, APB_MISC_GP_OBSCTRL_0, temp);
+ value1 = NV_REGR(rm, NvRmModuleID_Misc, 0, APB_MISC_GP_OBSCTRL_0);
+ timeout = 100;
+ do {
+ value2 = value1;
+ value1 = NV_REGR(rm, NvRmModuleID_Misc, 0, APB_MISC_GP_OBSDATA_0);
+ timeout --;
+ } while (value1 != value2 && timeout);
+ NvOsDebugPrintf("OBS bus modID 0x%x index 0x%x = value 0x%x",
+ modID, index, value1);
+ value[offset] = value1;
+ }
+ return NvSuccess;
+}
+
+/******************************************************************************/
+
+#define NVRM_AP15_MONITORED_EVENTS_MAX (2)
+
+// AP15 CP15 performance monitor control register layout
+#define AP15_CP15_PMNC_0_ENABLE_RANGE 0:0
+#define AP15_CP15_PMNC_0_EVENT_CNTS_RESET_RANGE 1:1
+#define AP15_CP15_PMNC_0_CYCLE_CNT_RESET_RANGE 2:2
+#define AP15_CP15_PMNC_0_EVENT0_CNT_OV_RANGE 8:8
+#define AP15_CP15_PMNC_0_EVENT1_CNT_OV_RANGE 9:9
+#define AP15_CP15_PMNC_0_CYCLE_CNT_OV_RANGE 10:10
+#define AP15_CP15_PMNC_0_EVENT0_RANGE 19:12
+#define AP15_CP15_PMNC_0_EVENT1_RANGE 27:20
+
+static void Ap15CorePerfMonDisable(void)
+{
+ // Disable all performance counters
+ NvU32 RegValue = NV_DRF_NUM(AP15_CP15, PMNC, ENABLE, 0);
+ MCR(p15, 0, RegValue, c15, c12, 0);
+}
+
+static NvError Ap15CorePerfMonCheckStatus(void)
+{
+ // Check if performance counters are enabled and no overflow has occurred
+ NvU32 RegValue;
+ MRC(p15, 0, RegValue, c15, c12, 0);
+ if ((NV_DRF_VAL(AP15_CP15, PMNC, ENABLE, RegValue) == 0) ||
+ (NV_DRF_VAL(AP15_CP15, PMNC, CYCLE_CNT_OV, RegValue) == 1) ||
+ (NV_DRF_VAL(AP15_CP15, PMNC, EVENT0_CNT_OV, RegValue) == 1) ||
+ (NV_DRF_VAL(AP15_CP15, PMNC, EVENT1_CNT_OV, RegValue) == 1))
+ return NvError_InvalidState;
+ else
+ return NvSuccess;
+}
+
+static void Ap15CorePerfMonStart(NvU32* pEventList, NvU32* pEventListSize)
+{
+ NvU32 RegValue, Event0, Event1;
+
+ // Just return maximum monitored events if no input list, otherwise
+ // get both events ready (set the same if only one specified)
+ if (*pEventListSize == 0)
+ {
+ *pEventListSize = NVRM_AP15_MONITORED_EVENTS_MAX;
+ return;
+ }
+ Event0 = Event1 = pEventList[0];
+ if (*pEventListSize >= NVRM_AP15_MONITORED_EVENTS_MAX)
+ {
+ Event1 = pEventList[1];
+ *pEventListSize = NVRM_AP15_MONITORED_EVENTS_MAX;
+ }
+
+ // Reset, clear overflow flags and enable 3 performance counters:
+ // total cycle counter and 2 event counters
+ RegValue =
+ NV_DRF_NUM(AP15_CP15, PMNC, ENABLE, 1) |
+ NV_DRF_NUM(AP15_CP15, PMNC, EVENT_CNTS_RESET, 1) |
+ NV_DRF_NUM(AP15_CP15, PMNC, CYCLE_CNT_RESET, 1) |
+ NV_DRF_NUM(AP15_CP15, PMNC, CYCLE_CNT_OV, 1) |
+ NV_DRF_NUM(AP15_CP15, PMNC, EVENT0_CNT_OV, 1) |
+ NV_DRF_NUM(AP15_CP15, PMNC, EVENT1_CNT_OV, 1) |
+ NV_DRF_NUM(AP15_CP15, PMNC, EVENT0, Event0) |
+ NV_DRF_NUM(AP15_CP15, PMNC, EVENT1, Event1);
+ MCR(p15, 0, RegValue, c15, c12, 0);
+}
+
+static NvError Ap15CorePerfMonStop(
+ NvU32* pCountListSize,
+ NvU32* pCountList,
+ NvU32* pTotalCycleCount)
+{
+ NvU32 ccnt, pmn0, pmn1;
+
+ // Disable monotors and check status
+ NvError err = Ap15CorePerfMonCheckStatus();
+ Ap15CorePerfMonDisable();
+ if (err != NvSuccess)
+ return err;
+
+ // Read back cycle and event counters
+ MRC(p15, 0, ccnt, c15, c12, 1);
+ MRC(p15, 0, pmn0, c15, c12, 2);
+ MRC(p15, 0, pmn1, c15, c12, 3);
+
+ // Return total cycle count always, and event counts depending on
+ // the room provided by the caller
+ *pTotalCycleCount = ccnt;
+ if (*pCountListSize == 0)
+ return NvSuccess;
+
+ pCountList[0] = pmn1; // ARM spec Event0 <=> Counter 1 (not a typo)
+ if (*pCountListSize >= NVRM_AP15_MONITORED_EVENTS_MAX)
+ {
+ pCountList[1] = pmn0; // ARM spec Event1 <=> Counter 0 (not a typo)
+ *pCountListSize = NVRM_AP15_MONITORED_EVENTS_MAX;
+ }
+ return NvSuccess;
+}
+
+NvError
+NvRmCorePerfMonStart(
+ NvRmDeviceHandle hRmDevice,
+ NvU32* pEventListSize,
+ NvU32* pEventList)
+{
+ NvU32 cpst;
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pEventListSize);
+ NV_ASSERT ((*pEventListSize == 0) || pEventList);
+
+ // Monitoring is supported only for SoC environment in one
+ // of the privileged modes
+ GET_CPSR(cpst);
+ if(IS_USER_MODE(cpst))
+ return NvError_NotSupported;
+ if (NvRmPrivGetExecPlatform(hRmDevice) != ExecPlatform_Soc)
+ return NvError_NotSupported;
+
+ switch (hRmDevice->ChipId.Id)
+ {
+ case 0x15:
+ case 0x16:
+ Ap15CorePerfMonStart(pEventList, pEventListSize);
+ return NvSuccess;
+ case 0x20:
+ return NvError_NotSupported;
+ default:
+ NV_ASSERT(!"Invalid chip ID");
+ return NvError_NotSupported;
+ }
+}
+
+NvError
+NvRmCorePerfMonStop(
+ NvRmDeviceHandle hRmDevice,
+ NvU32* pCountListSize,
+ NvU32* pCountList,
+ NvU32* pTotalCycleCount)
+{
+ NvU32 cpst;
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pTotalCycleCount);
+ NV_ASSERT(pCountListSize);
+ NV_ASSERT ((*pCountListSize == 0) || pCountList);
+
+ // Monitoring is supported only for SoC environment in one
+ // of the privileged modes
+ GET_CPSR(cpst);
+ if(IS_USER_MODE(cpst))
+ return NvError_NotSupported;
+ if (NvRmPrivGetExecPlatform(hRmDevice) != ExecPlatform_Soc)
+ return NvError_NotSupported;
+
+ switch (hRmDevice->ChipId.Id)
+ {
+ case 0x15:
+ case 0x16:
+ return Ap15CorePerfMonStop(
+ pCountListSize, pCountList, pTotalCycleCount);
+ case 0x20:
+ return NvError_NotSupported;
+ default:
+ NV_ASSERT(!"Invalid chip ID");
+ return NvError_NotSupported;
+ }
+}
+
+static NvOsInterruptHandle s_McInterruptHandle = NULL;
+static void McErrorIntHandler(void* args)
+{
+ NvU32 RegVal;
+ NvU32 IntStatus;
+ NvU32 IntClear = 0;
+ NvRmDeviceHandle hRm = (NvRmDeviceHandle)args;
+
+ IntStatus = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0, MC_INTSTATUS_0);
+ if ( NV_DRF_VAL(MC, INTSTATUS, DECERR_AXI_INT, IntStatus) )
+ {
+ IntClear |= NV_DRF_DEF(MC, INTSTATUS, DECERR_AXI_INT, SET);
+ RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_DECERR_AXI_ADR_0);
+ NvOsDebugPrintf("AXI DecErrAddress=0x%x ", RegVal);
+ RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_DECERR_AXI_STATUS_0);
+ NvOsDebugPrintf("AXI DecErrStatus=0x%x ", RegVal);
+ }
+ if ( NV_DRF_VAL(MC, INTSTATUS, DECERR_EMEM_OTHERS_INT, IntStatus) )
+ {
+ IntClear |= NV_DRF_DEF(MC, INTSTATUS, DECERR_EMEM_OTHERS_INT, SET);
+ RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_DECERR_EMEM_OTHERS_ADR_0);
+ NvOsDebugPrintf("EMEM DecErrAddress=0x%x ", RegVal);
+ RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_DECERR_EMEM_OTHERS_STATUS_0);
+ NvOsDebugPrintf("EMEM DecErrStatus=0x%x ", RegVal);
+ }
+ if ( NV_DRF_VAL(MC, INTSTATUS, INVALID_GART_PAGE_INT, IntStatus) )
+ {
+ IntClear |= NV_DRF_DEF(MC, INTSTATUS, INVALID_GART_PAGE_INT, SET);
+ RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_GART_ERROR_ADDR_0);
+ NvOsDebugPrintf("GART DecErrAddress=0x%x ", RegVal);
+ RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_GART_ERROR_REQ_0);
+ NvOsDebugPrintf("GART DecErrStatus=0x%x ", RegVal);
+ }
+
+ NV_ASSERT(!"MC Decode Error ");
+ // Clear the interrupt.
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_INTSTATUS_0, IntClear);
+ NvRmInterruptDone(s_McInterruptHandle);
+}
+
+NvError NvRmPrivAp15McErrorMonitorStart(NvRmDeviceHandle hRm)
+{
+ NvU32 val;
+ NvU32 IrqList;
+ NvError e = NvSuccess;
+ NvOsInterruptHandler handler;
+
+ if (s_McInterruptHandle == NULL)
+ {
+ // Install an interrupt handler.
+ handler = McErrorIntHandler;
+ IrqList = NvRmGetIrqForLogicalInterrupt(hRm,
+ NvRmPrivModuleID_MemoryController, 0);
+ NV_CHECK_ERROR( NvRmInterruptRegister(hRm, 1, &IrqList, &handler,
+ hRm, &s_McInterruptHandle, NV_TRUE) );
+ // Enable Dec Err interrupts in memory Controller.
+ val = NV_DRF_DEF(MC, INTMASK, DECERR_AXI_INTMASK, UNMASKED) |
+ NV_DRF_DEF(MC, INTMASK, DECERR_EMEM_OTHERS_INTMASK, UNMASKED) |
+ NV_DRF_DEF(MC, INTMASK, INVALID_GART_PAGE_INTMASK, UNMASKED);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_INTMASK_0, val);
+ }
+ return e;
+}
+
+void NvRmPrivAp15McErrorMonitorStop(NvRmDeviceHandle hRm)
+{
+ NvRmInterruptUnregister(hRm, s_McInterruptHandle);
+ s_McInterruptHandle = NULL;
+}
+
+/* This function sets some performance timings for Mc & Emc. Numbers are from
+ * the Arch team.
+ *
+ */
+void NvRmPrivAp15SetupMc(NvRmDeviceHandle hRm)
+{
+ NvU32 reg, mask;
+ reg = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_LOWLATENCY_CONFIG_0);
+ mask = NV_DRF_DEF(MC, LOWLATENCY_CONFIG, CMCR_LL_CTRL, ENABLE) |
+ NV_DRF_DEF(MC, LOWLATENCY_CONFIG, CMCR_LL_SEND_BOTH, ENABLE) |
+ NV_DRF_DEF(MC, LOWLATENCY_CONFIG, MPCORER_LL_CTRL, ENABLE) |
+ NV_DRF_DEF(MC, LOWLATENCY_CONFIG, MPCORER_LL_SEND_BOTH, ENABLE);
+ if ( mask != (reg & mask) )
+ NV_ASSERT(!"MC LL Path not enabled!");
+
+ /* 1) TIMEOUT value for VDE is 256 cycles, 3D, 2D timeouts are disabled, all others 512 cycles. */
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_CTRL_0, 0x00000028);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_CMC_0, 0x88888888);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_DC_0, 0x88888888);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_DCB_0, 0x88888888);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_EPP_0, 0x88888888);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_G2_0, 0x0);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_HC_0, 0x88888888);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_ISP_0, 0x88888888);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_MPCORE_0, 0x88888888);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_MPEA_0, 0x88888888);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_MPEB_0, 0x88888888);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_MPEC_0, 0x88888888);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_NV_0, 0x0);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_PPCS_0, 0x88888888);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_VDE_0, 0x44444444);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT1_VDE_0, 0x44444444);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_TIMEOUT_VI_0, 0x88888888);
+
+ /* 2) Command Queue values should be 2,2,6 for better performance. */
+ NV_REGW(hRm, NvRmPrivModuleID_ExternalMemoryController, 0, EMC_CMDQ_0, 0x00002206);
+
+ /* 3) MC_EMEM_ARB_CFG0_0 Should have optimal values for 166Mhz DRAM.
+ * 27:22 EMEM_BANKCNT_NSP_TH (0xC seems to be better for 166Mhz)
+ * 21:16 EMEM_BANKCNT_TH (0x8 seems to be better for 166Mhz)
+ *
+ * MC_EMEM_ARB_CFG0_0 <= 0x0308_1010
+ */
+
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_EMEM_ARB_CFG0_0, 0x03081010);
+}
+
+/******************************************************************************/
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_pinmux_tables.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_pinmux_tables.c
new file mode 100644
index 000000000000..7367ed143c8f
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_pinmux_tables.c
@@ -0,0 +1,1166 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_drf.h"
+#include "nvassert.h"
+#include "nvrm_hwintf.h"
+#include "ap15rm_private.h"
+#include "ap15/arapb_misc.h"
+#include "ap15/arclk_rst.h"
+#include "nvrm_pinmux_utils.h"
+#include "nvodm_query_pinmux.h"
+#include "nvrm_clocks.h"
+
+/**
+ * Each of the pin mux configurations defined in the pin mux spreadsheet are
+ * stored in tables below. For each configuration, every pad group that
+ * must be programmed is stored as a single 32b entry, where the register
+ * offset (for both the tristate and pin mux control registers), field bit
+ * position (ditto), pin mux mask, and new pin mux state are programmed.
+ *
+ * Furthermore, a simple state machine is implemented, so that pin mux
+ * registers can be "unprogrammed," in order to disown pad groups which
+ * may be pointing to a controller which is about to be programmed. The
+ * state machine also has no-op states which indicate when all necessary
+ * register programming for a configuration is complete, as well as when the
+ * last configuration for a module instance has been reached.
+ *
+ * Each module instance array has a reserved "reset" configuration at index
+ * zero. This special configuration is used in order to disown all pad
+ * groups whose reset state refers to the module instance. When a module
+ * instance configuration is to be applied, the reset configuration will
+ * first be applied, to ensure that no conflicts will arise between register
+ * reset values and the new configuration, followed by the application of
+ * the requested configuration.
+ *
+ * Furthermore, for controllers which support dynamic pinmuxing (i.e.,
+ * the "Multiplexed" pin map option), the last table entry is reserved for
+ * a "global unset," which will ensure that all configurations are disowned.
+ * This Multiplexed configuration should be applied before transitioning
+ * from one configuration to a second one.
+ *
+ * The table data has been packed into a single 32b entry to minimize code
+ * footprint using macros similar to the hardware register definitions, so
+ * that all of the shift and mask operations can be performed with the DRF
+ * macros.
+ */
+
+/* Below are the tables for all of the pin mux configurations for each
+ * controller. The first (zero-index) entry in each table is a "reset"
+ * configuration. This is used to disown all pads whose reset state
+ * corresponds to the controller function. When a new configuration is
+ * applied, the driver will first apply the reset configuration to ensure
+ * that no conflicts will occur due to identical signals being routed to
+ * multiple pad groups.
+ */
+
+ const NvU32 g_Ap15MuxI2c1[] = {
+ // Reset config -- disown GEN1_I2C pads
+ UNCONFIG(A, RM,I2C, RSVD1), CONFIGEND(),
+ // I2C1, Config 1 (GEN1_I2C pads)
+ CONFIG(A,A,RM,I2C), CONFIGEND(),
+ // I2C1, Config 2 (SPDIF pads) -- disown GEN1_I2C pads
+ CONFIG(B,D,SPDO,I2C), CONFIG(B,D,SPDI,I2C), CONFIGEND(),
+ // I2C1, Config 3 (SPI2 pads)
+ CONFIG(B,D,SPIG,I2C),CONFIG(B,D,SPIH,I2C), CONFIGEND(),
+ MODULEDONE()
+};
+
+/* I2C_2 instance 1 supports dynamic pin-muxing for CAM_I2C and GEN2_I2C;
+ * PinMap_Multiplex is intended to release all pads to a nominal
+ * state, so it is implemented at the end of the list using UNCONFIG
+ * options, so that no pad groups are trying to use I2C_2.
+ */
+const NvU32 g_Ap15MuxI2c2[] = {
+ // Reset & multiplexed config -- disown GEN2_I2C2 pads
+ UNCONFIG(G,PTA,I2C2,RSVD1),UNCONFIG(G,DTF,I2C2,RSVD1),UNCONFIG(E,LVP0,I2C2,RSVD),
+ UNCONFIG(E,LM1,I2C2,DISPLAYA),UNCONFIG(G,LHP0,I2C2,DISPLAYA),
+ UNCONFIG(G,LVP1,I2C2,DISPLAYA),CONFIGEND(),
+ // CAM_I2C pads
+ CONFIG(D,G,DTF,I2C2), CONFIGEND(),
+ // GEN2_I2C pads
+ CONFIG(A,G,PTA,I2C2), CONFIGEND(),
+ // LCD control pads
+ CONFIG(C,E,LVP0,I2C2), CONFIG(C,E,LM1,I2C2), CONFIGEND(),
+ // alternate LCD control pads
+ CONFIG(C,G,LHP0,I2C2), CONFIG(C,G,LVP1,I2C2), CONFIGEND(),
+ MODULEDONE()
+};
+
+ const NvU32* g_Ap15MuxI2c[] = {
+ &g_Ap15MuxI2c1[0],
+ &g_Ap15MuxI2c2[0],
+ NULL
+};
+ const NvU32 g_Ap15MuxI2c_Pmu[] = {
+ // Reset config -- disown I2CP pads
+ UNCONFIG(C,I2CP,I2C, RSVD2), CONFIGEND(),
+ // I2CP pads
+ CONFIG(A,C,I2CP,I2C), CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32* g_Ap15MuxI2cPmu[] = {
+ &g_Ap15MuxI2c_Pmu[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Mmc[] = {
+ CONFIGEND(), // no pad groups reset to MMC, so nothing to disown for reset config
+ CONFIG(A,A,ATB,HSMMC), CONFIG(A,A,ATD,HSMMC), CONFIG(B,A,ATE,HSMMC), CONFIGEND(),
+ CONFIG(A,A,ATB,HSMMC),CONFIG(A,A,ATD,HSMMC),CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32* g_Ap15MuxMmc[] = {
+ &g_Ap15Mux_Mmc[0],
+ NULL
+};
+ const NvU32 g_Ap15MuxSdio2[] = {
+ // Reset config - abandon SDB, SLXK,SLXA,SLXB,SLXC,SLXD .chosen RSVD,SLINK4B
+ UNCONFIG(D,SDB,SDIO2,RSVD), UNCONFIG(B,SLXK,SDIO1,SLINK4B), UNCONFIG(B,SLXB,SDIO1,SLINK4B),
+ UNCONFIG(B,SLXC,SDIO1,SLINK4B),UNCONFIG(B,SLXD,SDIO1,SLINK4B),UNCONFIG(B,SLXA,SDIO1,SLINK4B),
+ CONFIGEND(),
+ // config 1 SDB + SLXK,SLXA,SLXB,SLXC,SLXD pads
+ CONFIG(B,D,SDB,SDIO2), CONFIG(B,B,SLXK,SDIO1), CONFIG(B,B,SLXB,SDIO1),
+ CONFIG(B,B,SLXC,SDIO1), CONFIG(B,B,SLXD,SDIO1), CONFIG(B,B,SLXA,SDIO1),CONFIGEND(),
+ // config 2 KBCB,KBCE,KBCD pads
+ CONFIG(A,C,KBCB,SDIO1),CONFIG(A,A,KBCE,SDIO1),CONFIG(D,G,KBCD,SDIO1),
+ CONFIGEND(),
+ //config 3 KBCB pads
+ CONFIG(A,C,KBCB,SDIO1), CONFIGEND(),
+ // config 4 DAP1, SPDO, SPDI pads
+ CONFIG(A,C,DAP1,SDIO1), CONFIG(B,D,SPDO,SDIO1), CONFIG(B,D,SPDI,SDIO1), CONFIGEND(),
+ // config 5 DTA,DTD pads
+ CONFIG(A,B,DTA,SDIO1), CONFIG(A,B,DTD,SDIO1), CONFIGEND(),
+ MODULEDONE()
+};
+
+ const NvU32 g_Ap15MuxSdio3[] = {
+ // no pad groups reset to SDIO3, so nothing to disown for reset config
+ CONFIGEND(),
+ // config1 SDD + SDC+SLXK+SLXA+SLXB pads
+ CONFIG(B,D,SDD,SDIO2), CONFIG(B,D,SDC,SDIO2), CONFIG(B,D,SDB,SDIO2_ALT),
+ CONFIG(B,B,SLXA,SDIO2), CONFIG(B,B,SLXK,SDIO2), CONFIG(B,B,SLXB,SDIO2), CONFIGEND(),
+ // congig 2 SDD, SDC pads
+ CONFIG(B,D,SDD,SDIO2), CONFIG(B,D,SDC,SDIO2), CONFIGEND(),
+ MODULEDONE()
+};
+
+ const NvU32* g_Ap15MuxSdio[] = {
+ &g_Ap15MuxSdio2[0],
+ &g_Ap15MuxSdio3[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Spdif[] = {
+ // Reset config - abandon SPDO, SPDI .chosen RSVD.
+ UNCONFIG(D,SPDO,SPDIF,RSVD), UNCONFIG(D,SPDI,SPDIF,RSVD),CONFIGEND(),
+ // config1 SPDO+ SPDI pads
+ CONFIG(B,D,SPDO,SPDIF), CONFIG(B,D,SPDI,SPDIF), CONFIGEND(),
+ // congig 2 SLXD, SLXC pads
+ CONFIG(B,B,SLXD,SPDIF), CONFIG(B,B,SLXC,SPDIF), CONFIGEND(),
+ // congig 3 UAD, pads
+ CONFIG(B,A,UAD,SPDIF), CONFIGEND(),
+ MODULEDONE()
+};
+
+ const NvU32* g_Ap15MuxSpdif[] = {
+ &g_Ap15Mux_Spdif[0],
+ NULL
+};
+static const NvU32 g_Ap15MuxUart1[] = {
+ // Reset config - abandon IRRX, IRTX &amp; SDD
+ UNCONFIG(C,IRRX,UARTA,RSVD2), UNCONFIG(C,IRTX,UARTA,RSVD2), UNCONFIG(D,SDD,UARTA,PWM), CONFIGEND(),
+ // 8b UAA + UAB pads
+ CONFIG(B,A,UAA,UARTA), CONFIG(B,A,UAB,UARTA), CONFIGEND(),
+ // 4b UAA pads
+ CONFIG(B,A,UAA,UARTA_ALT3), CONFIGEND(),
+ // 8b GPU pads
+ CONFIG(A,D,GPU,UARTA), CONFIGEND(),
+ // 4b VFIR + UAD pads
+ CONFIG(A,C,IRRX,UARTA), CONFIG(A,C,IRTX,UARTA), CONFIG(B,A,UAD,UARTA), CONFIGEND(),
+ // 2b VFIR pads
+ CONFIG(A,C,IRRX,UARTA), CONFIG(A,C,IRTX,UARTA), CONFIGEND(),
+ // 2b SDIO pads
+ CONFIG(B,D,SDD,UARTA), CONFIGEND(),
+ MODULEDONE()
+};
+static const NvU32 g_Ap15MuxUart2[] = {
+// Reset config - abandon UAD. pads.chosen SFLASH pads
+ UNCONFIG(A,UAD,IRDA,SFLASH), CONFIGEND(),
+// 4b UAD + IRRX + IRTX pads
+ CONFIG(B,A,UAD,IRDA), CONFIG(A,C,IRRX,UARTB), CONFIG(A,C,IRTX,UARTB), CONFIGEND(),
+// 4b UAB pads
+ CONFIG(B,A,UAB,UARTB), CONFIGEND(),
+//..2b UAB pads
+ CONFIG(B,A,UAD,IRDA), CONFIGEND(),
+ MODULEDONE()
+};
+
+static const NvU32 g_Ap15MuxUart3[] = {
+ // Reset config - abandon UCA. chosen RSVD1
+ UNCONFIG(B,UCA,UARTC,RSVD1), CONFIGEND(),
+ // 4b UCA + UCB pads
+ CONFIG(B,B,UCA,UARTC), CONFIG(B,B,UCB,UARTC), CONFIGEND(),
+ // 2b UCA pads
+ CONFIG(B,B,UCA,UARTC), CONFIGEND(),
+ MODULEDONE()
+};
+
+static const NvU32* g_Ap15MuxUart[] = {
+ &g_Ap15MuxUart1[0],
+ &g_Ap15MuxUart2[0],
+ &g_Ap15MuxUart3[0],
+ NULL
+};
+ const NvU32 g_Ap15MuxSpi1[] = {
+ // Reset config - abandon SPIC, SPIB, SPIA, pads.
+ UNCONFIG(D,SPIC,SPI1,RSVD), UNCONFIG(D,SPIB,SPI1,RSVD),
+ UNCONFIG(D,SPIA,SPI1,RSVD), CONFIGEND(),
+ // SPIE,SPIF,SPID pads
+ CONFIG(B,D,SPIE,SPI1),CONFIG(B,D,SPIF,SPI1),CONFIG(B,D,SPID,SPI1), CONFIGEND(),
+ // DTE, DTB pads
+ CONFIG(A,B,DTE,SPI1), CONFIG(A,B,DTB,SPI1), CONFIGEND(),
+ // SPIC,SPIB,SPIA pads
+ CONFIG(B,D,SPIC,SPI1), CONFIG(B,D,SPIB,SPI1), CONFIG(B,D,SPIA,SPI1), CONFIGEND(),
+ // LHP2,LHP1,LHP0,LVP1,LDI,LPP pads
+ CONFIG(C,G,LHP2,SPI1), CONFIG(C,G,LHP1,SPI1), CONFIG(C,G,LHP0,SPI1),
+ CONFIG(C,G,LVP1,SPI1), CONFIG(D,G,LDI,SPI1), CONFIG(D,G,LPP,SPI1), CONFIGEND(),
+ MODULEDONE()
+};
+
+ const NvU32 g_Ap15MuxSpi2[] = {
+ // Reset config - abandon UAB, pads. MIPI_HS chosen
+ UNCONFIG(A,UAB,SPI2,MIPI_HS), UNCONFIG(D,SPID,SPI2,RSVD),
+ UNCONFIG(D,SPIE,SPI2,RSVD), CONFIGEND(),
+ //..SPIC,SPIB,SPIA,SPIG, SPIH Pads
+ CONFIG(B,D,SPIC,SPI2), CONFIG(B,D,SPIB,SPI2), CONFIG(B,D,SPIA,SPI2),
+ CONFIG(B,D,SPIG,SPI2), CONFIG(B,D,SPIH,SPI2), CONFIGEND(),
+ // UAB pads
+ CONFIG(B,A,UAB,SPI2), CONFIGEND(),
+ // SPIE,SPIF,SPID,SPIG,SPIH pads
+ CONFIG(B,D,SPIE,SPI2_ALT),CONFIG(B,D,SPIF,SPI2),CONFIG(B,D,SPID,SPI2_ALT),
+ CONFIG(B,D,SPIG,SPI2_ALT),CONFIG(B,D,SPIH,SPI2_ALT), CONFIGEND(),
+ // SLXC,SLXK,SLXA,SLXB,SLXD pads
+ CONFIG(B,B,SLXC,SPI2), CONFIG(B,B,SLXK,SPI2), CONFIG(B,B,SLXA,SPI2),
+ CONFIG(B,B,SLXB,SPI2),CONFIG(B,B,SLXD,SPI2), CONFIGEND(),
+ MODULEDONE()
+};
+
+/* SPI instance 3 supports dynamic pin-muxing for audio-codec &amp;
+ * display, PinMap_Multiplex is intended to release all pads to a nominal
+ * state, so it is implemented at the end of the list using UNCONFIG
+ * options, so that no pad groups are trying to use SPI3.
+ */
+ const NvU32 g_Ap15MuxSpi3[] = {
+/* Reset config - abandon UAA, SPIF, SPIG, SPIH pads. SPI2_ALT chosen
+ * as the reset state for SPIG/SPIH, since this will either be clobbered
+ * by Spi2 SpiPinMap_Config1, I2c1 I2cPinMap_Config3, correct (for Spi2
+ * SpiPinMap_Config3), or irrelevant */
+ UNCONFIG(A,UAA,SPI3,MIPI_HS), UNCONFIG(D,SPIF,SPI3,RSVD),
+ UNCONFIG(D,SPIG,SPI3,SPI2_ALT), UNCONFIG(D,SPIH,SPI3,SPI2_ALT),
+ // multiplex unconfiguration
+ UNCONFIG(C,XM2A,SPI3,SPROM), // multiplex config 1 to SPROM
+ UNCONFIG(E,LSC1,SPI3,DISPLAYA), UNCONFIG(E,LPW2,SPI3,DISPLAYA), // mux config 2 to displaya
+ UNCONFIG(E,LPW0,SPI3,DISPLAYA), UNCONFIG(E,LM0,SPI3,DISPLAYA),
+ UNCONFIG(E,LSCK,SPI3,DISPLAYA), UNCONFIG(E,LSDI,SPI3,DISPLAYA), // mux config 3 to displaya
+ UNCONFIG(D,SPIC,SPI3,RSVD),UNCONFIG(D,SPIB,SPI3,RSVD), // config 5 to rsvd
+ UNCONFIG(D,SPIA,SPI3,RSVD),
+ UNCONFIG(D,SDD,SPI3,PWM),UNCONFIG(D,SDC,SPI3,TWC), // config 6 to PWM & TWC
+ CONFIGEND(),
+ // XM2A pads
+ CONFIG(B,C,XM2A,SPI3), CONFIGEND(),
+ // LCD pads
+ CONFIG(C,E,LSC1,SPI3), CONFIG(D,E,LPW2,SPI3), CONFIG(D,E,LPW0,SPI3), CONFIG(C,E,LM0,SPI3), CONFIGEND(),
+ // Alternate LCD pads
+ CONFIG(C,E,LSCK,SPI3), CONFIG(D,E,LSDI,SPI3), CONFIG(D,E,LSDA,SPI3), CONFIG(C,E,LCSN,SPI3), CONFIGEND(),
+ // UAA pads
+ CONFIG(B,A,UAA,SPI3), CONFIGEND(),
+ // SPI pads
+ CONFIG(B,D,SPIA,SPI3), CONFIG(B,D,SPIB,SPI3), CONFIG(B,D,SPIC,SPI3), CONFIGEND(),
+ // 2CS SPI3 on SDIO pads
+ CONFIG(B,D,SDC,SPI3), CONFIG(B,D,SDD,SPI3), CONFIGEND(),
+ MODULEDONE()
+};
+
+ const NvU32* g_Ap15MuxSpi[] = {
+ &g_Ap15MuxSpi1[0],
+ &g_Ap15MuxSpi2[0],
+ &g_Ap15MuxSpi3[0],
+ NULL
+};
+
+// Sflash should always be after PWM in the module order, since
+// the reset value for UCB muxes from both controllers, so the
+// reset configuration for Sflash assumes that Pwm has executed first.
+NV_CT_ASSERT((NvU32)NvOdmIoModule_Sflash > (NvU32)NvOdmIoModule_Pwm);
+
+const NvU32 g_Ap15Mux_Sflash[] = {
+ /* Reset config. Normally, this would disown the UCB pads; HOWEVER,
+ * the reset value for this pad group actually muxes from 2 controllers:
+ * PWM goes to UART3_RTS, and SFLASH goes to UART3_CTS. Since the PWM
+ * controller is initialized before Spi Flash, it is possible for the
+ * UCB pads to be correctly configured to mux 0 before reaching here.
+ * Therefore, the correct thing to do is to skip the UNCONFIG for this
+ * pad group, since PWM will already handle this.
+ */
+ /*UNCONFIG(B,UCB,PWM0,RSVD2),*/ CONFIGEND(),
+ // config 1 XM2S + XM2A pads
+ CONFIG(B,C,XM2S,SPI), CONFIG(B,C,XM2A,SPI), CONFIGEND(),
+ // config2 XM2S + UAD +XM2A pads
+ CONFIG(B,C,XM2S,SPI), CONFIG(B,A,UAD,SFLASH), CONFIG(B,C,XM2A,SPI), CONFIGEND(),
+ // config 3 XM2S + UCB +XM2A pads
+ CONFIG(B,C,XM2S,SPI), CONFIG(B,B,UCB,PWM0), CONFIG(B,C,XM2A,SPI), CONFIGEND(),
+ // config 4 XM2A UAD UCB XM2A pads
+ CONFIG(B,C,XM2S,SPI), CONFIG(B,A,UAD,SFLASH), CONFIG(B,B,UCB,PWM0),
+ CONFIG(B,C,XM2A,SPI), CONFIGEND(),
+ MODULEDONE()
+};
+
+ const NvU32* g_Ap15MuxSflash[] = {
+ &g_Ap15Mux_Sflash[0],
+ NULL
+};
+
+
+ const NvU32 g_Ap15Mux_Twc[] = {
+ // no pad groups reset to TWC, so nothing to disown for reset config
+ CONFIGEND(),
+ // DAP2 pads
+ CONFIG(A,C,DAP2,TWC), CONFIGEND(),
+ // SDC pads
+ CONFIG(B,D,SDC,TWC), CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32* g_Ap15MuxTwc[] = {
+ &g_Ap15Mux_Twc[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Ata[] = {
+ // Reset config -- abandon ATA, ATC, ATB, ATD, ATE pads. NAND RSVD as chosenpads
+ UNCONFIG(A,ATC,IDE,RSVD), UNCONFIG(A,ATD,IDE,NAND), UNCONFIG(A,ATE,IDE,NAND),
+ UNCONFIG(A,ATA,IDE,RSVD), UNCONFIG(A,ATB,IDE,NAND), CONFIGEND(),
+ // ATA, Config 1 (Nand pads)
+ CONFIG(A,A,ATC,IDE), CONFIG(A,A,ATD,IDE), CONFIG(B,A,ATE,IDE), CONFIG(A,A,ATA,IDE),
+ CONFIG(A,A,ATB,IDE), CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32* g_Ap15MuxAta[] = {
+ &g_Ap15Mux_Ata[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Pwm[] = {
+ // Reset config -- disown SDC,UCB pads SDIO2, RSVD2 as chosen pads
+ UNCONFIG(D,SDC,PWM,SDIO2), UNCONFIG(B,UCB,PWM0,RSVD2), CONFIGEND(),
+ // PWM, Config 1 (SDC pads)
+ CONFIG(B,D,SDC,PWM), CONFIGEND(),
+ // PWM, Config 2 (UCB ,SDDpads)
+ CONFIG(B,B,UCB,PWM0), CONFIG(B,D,SDD,PWM), CONFIGEND(),
+ // PWM, Config 2 (UCB ,SDDpads)
+ CONFIG(B,B,UCB,PWM0), CONFIGEND(),
+ CONFIG(B,D,SDD,PWM), CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32* g_Ap15MuxPwm[] = {
+ &g_Ap15Mux_Pwm[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Hsi[] = {
+ CONFIGEND(), // no pad groups reset to HSI, so nothing to disown for reset config
+ CONFIG(B,A,UAA,MIPI_HS), CONFIG(B,A,UAB,MIPI_HS), CONFIGEND(),
+ MODULEDONE()
+};
+
+ const NvU32 *g_Ap15MuxHsi[] = {
+ &g_Ap15Mux_Hsi[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Nand[] = {
+ CONFIGEND(), // no pad groups reset to NAND, so nothing to disown for reset config
+ // config 1 ATA,ATB,ATC,ATD,ATE pads
+ CONFIG(A,A,ATA,NAND_ALT), CONFIG(A,A,ATB,NAND_ALT), CONFIG(A,A,ATC,NAND),
+ CONFIG(A,A,ATD,NAND), CONFIG(B,A,ATE,NAND), CONFIGEND(),
+ // config 1 ATA,ATB,ATC,ATD,ATE pads
+ CONFIG(A,A,ATA,NAND), CONFIG(A,A,ATB,NAND), CONFIG(A,A,ATC,NAND),
+ CONFIG(A,A,ATD,NAND), CONFIG(B,A,ATE,NAND), CONFIGEND(),
+ // config 1 ATA,ATC,ATE pads
+ CONFIG(A,A,ATA,NAND), CONFIG(A,A,ATC,NAND),
+ CONFIG(B,A,ATE,NAND_ALT), CONFIGEND(),
+ // config 1 ATA,ATB,ATC,ATD,ATE pads
+ CONFIG(A,A,ATA,NAND), CONFIG(A,A,ATB,NAND), CONFIG(A,A,ATC,NAND),
+ CONFIG(A,A,ATD,NAND_ALT), CONFIG(B,A,ATE,NAND_ALT), CONFIGEND(),
+ // config 1 ATA,ATC pads
+ CONFIG(A,A,ATA,NAND), CONFIG(A,A,ATC,NAND), CONFIGEND(),
+ // config 1 ATA,ATB,ATC pads
+ CONFIG(A,A,ATA,NAND), CONFIG(A,A,ATB,NAND),
+ CONFIG(A,A,ATC,NAND), CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32* g_Ap15MuxNand[] = {
+ &g_Ap15Mux_Nand[0],
+ NULL
+};
+
+ const NvU32 g_Ap15MuxDap1[] = {
+ // Reset config - abandon ,DAP1.. RSVD2 chosen
+ UNCONFIG(C,DAP1,DAP1,RSVD2), CONFIGEND(),
+ // config1 DAP1 pads
+ CONFIG(A,C,DAP1,DAP1), CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32 g_Ap15MuxDap2[] = {
+ // Reset config - abandon ,DAP2... RSVD3 chosen
+ UNCONFIG(C,DAP2,DAP2,RSVD3), CONFIGEND(),
+ // config1 DAP2 pads
+ CONFIG(A,C,DAP2,DAP2), CONFIGEND(),
+ // congig 2 SLXD, SLXC pads
+ MODULEDONE()
+};
+ const NvU32 g_Ap15MuxDap3[] = {
+ // Reset config - abandon ,DAP3... RSVD2 chosen
+ UNCONFIG(C,DAP3,DAP3,RSVD2), CONFIGEND(),
+ // config1 DAP3 pads
+ CONFIG(A,C,DAP3,DAP3), CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32 g_Ap15MuxDap4[] = {
+ // Reset config - abandon ,DAP4...RSVD2 chosen
+ UNCONFIG(C,DAP4,DAP4,RSVD2), CONFIGEND(),
+ // config1 DAP4 pads
+ CONFIG(A,C,DAP4,DAP4), CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32* g_Ap15MuxDap[] = {
+ &g_Ap15MuxDap1[0],
+ &g_Ap15MuxDap2[0],
+ &g_Ap15MuxDap3[0],
+ &g_Ap15MuxDap4[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Kbc[] = {
+ // Reset config - abandon ,RSVD2, RSVD1 chosen
+ UNCONFIG(C,KBCA,KBC,RSVD2), UNCONFIG(C,KBCB,KBC,RSVD2), UNCONFIG(A,KBCE,KBC,RSVD1),
+ UNCONFIG(C,KBCC,KBC,RSVD2), UNCONFIG(G,KBCD,KBC,RSVD2), UNCONFIG(A,KBCF,KBC,RSVD1), CONFIGEND(),
+ // KBCA,KBCB,KBCC,KBCD,KBCE,KBCF pads
+ CONFIG(A,C,KBCA,KBC), CONFIG(A,C,KBCB,KBC), CONFIG(A,A,KBCE,KBC),
+ CONFIG(B,C,KBCC,KBC), CONFIG(D,G,KBCD,KBC), CONFIG(A,A,KBCF,KBC), CONFIGEND(),
+ // KBCA,KBCC,KBCD,KBCE,KBCF pads
+ CONFIG(A,C,KBCA,KBC), CONFIG(A,A,KBCE,KBC),
+ CONFIG(B,C,KBCC,KBC), CONFIG(D,G,KBCD,KBC), CONFIG(A,A,KBCF,KBC), CONFIGEND(),
+ // KBCA,KBCC,KBCF, pads
+ CONFIG(A,C,KBCA,KBC), CONFIG(B,C,KBCC,KBC), CONFIG(A,A,KBCF,KBC), CONFIGEND(),
+ // KBCA,KBCC pads
+ CONFIG(A,C,KBCA,KBC), CONFIG(B,C,KBCC,KBC), CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32* g_Ap15MuxKbc[] = {
+ &g_Ap15Mux_Kbc[0],
+ NULL
+};
+ NvU32 g_Ap15Mux_Hdcp[] = {
+ CONFIGEND(), // no pad groups reset to HDCP, so nothing to disown for reset config
+ CONFIG(A,G,PTA,HDMI), CONFIGEND(),
+ CONFIG(C,E,LSCK,HDMI), CONFIG(D,E,LSDA,HDMI), CONFIGEND(),
+ CONFIG(D,E,LPW2,HDMI), CONFIG(D,E,LPW0,HDMI), CONFIGEND(),
+ CONFIG(C,E,LSC1,HDMI), CONFIG(D,E,LPW0,HDMI), CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32* g_Ap15MuxHdcp[] = {
+ &g_Ap15Mux_Hdcp[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Hdmi[] = {
+ // HDINT resets to HDINT, so move it to a reserved pin
+ UNCONFIG(B,HDINT,RSVD1,RSVD2), CONFIGEND(),
+ CONFIG(C,B,HDINT,RSVD1), CONFIGEND(),
+ MODULEDONE()
+};
+
+ const NvU32* g_Ap15MuxHdmi[] = {
+ &g_Ap15Mux_Hdmi[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Mio[] = {
+ CONFIGEND(), // no pad groups reset to MIO, so nothing to disown for reset config
+ CONFIG(A,A,KBCF,MIO), CONFIG(D,G,KBCD,MIO), CONFIG(A,C,KBCB,MIO), CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32* g_Ap15MuxMio[] = {
+ &g_Ap15Mux_Mio[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Slink[] = {
+ CONFIGEND(), // no pad groups reset to SLINK, so nothing to disown for reset config
+ CONFIG(B,B,SLXK,SLINK4B), CONFIG(B,B,SLXA,SLINK4B), CONFIG(B,B,SLXB,SLINK4B),
+ CONFIG(B,B,SLXC,SLINK4B), CONFIG(B,B,SLXD,SLINK4B), CONFIGEND(),
+ MODULEDONE()
+};
+ const NvU32* g_Ap15MuxSlink[] = {
+ &g_Ap15Mux_Slink[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Vi[] = {
+ CONFIGEND(), // no pad groups reset to VI so nothing to disown for reset config
+ // config 1 DTA - DTF pads
+ BRANCH(NvOdmVideoInputPinMap_Config2), CONFIG(D,G,DTF,VI), CONFIGEND(),
+ // config 2 DTA - DTE and CSUS pads
+ CONFIG(A,B,DTA,VI), CONFIG(A,B,DTB,VI), CONFIG(A,B,DTC,VI),
+ CONFIG(A,B,DTD,VI), CONFIG(A,B,DTE,VI), CONFIGEND(),
+ MODULEDONE(),
+ SUBROUTINESDONE(),
+};
+
+ const NvU32* g_Ap15MuxVi[] = {
+ &g_Ap15Mux_Vi[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Crt[] = {
+ // Need to confirm and fix it ,but none of docs specifies about tv pad group
+ CONFIGEND(), // no pad groups reset to CRT so nothing to disown for reset config
+ // config 1 LHS, LVS, pads
+ CONFIG(D,E,LHS,CRT), CONFIG(C,E,LVS,CRT), CONFIGEND(),
+ // config 2 LHP2,LPW1 pads
+ CONFIG(C,G,LHP2,CRT), CONFIG(D,E,LPW1,CRT), CONFIGEND(),
+ // config 3 LM1,LPW1 pads
+ CONFIG(C,E,LM1,CRT), CONFIG(D,E,LPW1,CRT), CONFIGEND(),
+ // config 4 LHP2,LCSN pads
+ CONFIG(C,G,LHP2,CRT), CONFIG(C,E,LCSN,CRT), CONFIGEND(),
+ MODULEDONE()
+};
+
+const NvU32* g_Ap15MuxCrt[] = {
+ &g_Ap15Mux_Crt[0],
+ NULL
+};
+
+const NvU32 g_Ap15Mux_BacklightDisplay1Pwm0[] = {
+ CONFIGEND(),
+ // Config 1 LPW0 pad
+ CONFIG(D,E,LPW0,DISPLAYA), CONFIGEND(),
+ // Config 2 LPW2 pad
+ CONFIG(D,E,LPW2,DISPLAYA), CONFIGEND(),
+ // Config 3 LM0 pad
+ CONFIG(C,E,LM0,DISPLAYA), CONFIGEND(),
+ MODULEDONE()
+};
+
+const NvU32 g_Ap15Mux_BacklightDisplay1Pwm1[] = {
+ CONFIGEND(),
+ // Config 1 LM1 pad
+ CONFIG(C,E,LM1,DISPLAYA), CONFIGEND(),
+ // Config 2 LDC pad
+ CONFIG(C,E,LDC,DISPLAYA), CONFIGEND(),
+ // Config 3 LPW1 pad
+ CONFIG(D,E,LPW1,DISPLAYA), CONFIGEND(),
+ MODULEDONE()
+};
+
+const NvU32 g_Ap15Mux_BacklightDisplay2Pwm0[] = {
+ CONFIGEND(),
+ // Config 1 LPW0 pad
+ CONFIG(D,E,LPW0,DISPLAYB), CONFIGEND(),
+ // Config 2 LPW2 pad
+ CONFIG(D,E,LPW2,DISPLAYB), CONFIGEND(),
+ // Config 3 LM0 pad
+ CONFIG(C,E,LM0,DISPLAYB), CONFIGEND(),
+ MODULEDONE()
+};
+
+const NvU32 g_Ap15Mux_BacklightDisplay2Pwm1[] = {
+ CONFIGEND(),
+ // Config 1 LM1 pad
+ CONFIG(C,E,LM1,DISPLAYB), CONFIGEND(),
+ // Config 2 LDC pad
+ CONFIG(C,E,LDC,DISPLAYB), CONFIGEND(),
+ // Config 3 LPW1 pad
+ CONFIG(D,E,LPW1,DISPLAYB), CONFIGEND(),
+ MODULEDONE()
+};
+
+const NvU32* g_Ap15MuxBacklight[] = {
+ &g_Ap15Mux_BacklightDisplay1Pwm0[0],
+ &g_Ap15Mux_BacklightDisplay1Pwm1[0],
+ &g_Ap15Mux_BacklightDisplay2Pwm0[0],
+ &g_Ap15Mux_BacklightDisplay2Pwm1[0],
+ NULL
+};
+
+const NvU32 g_Ap15Mux_Display1[] = {
+ CONFIGEND(),
+ // config 1, 24b RGB. Pure superset of Config2 (18b RGB)
+ BRANCH(2),
+ CONFIG(C,G,LHP1,DISPLAYA),CONFIG(C,G,LHP2,DISPLAYA),CONFIG(C,G,LVP1,DISPLAYA),
+ CONFIG(C,G,LHP0,DISPLAYA),CONFIG(D,G,LDI,DISPLAYA),CONFIG(D,G,LPP,DISPLAYA),
+ CONFIGEND(),
+ // config 2, 18b RGB.
+ BRANCH(7),
+ CONFIG(C,E,LVS,DISPLAYA), CONFIG(D,E,LHS,DISPLAYA), CONFIG(D,E,LSPI,DISPLAYA),
+ CONFIGEND(),
+ // config 3, 8 & 9b CPU.
+ CONFIG(C,G,LHP1,DISPLAYA), CONFIG(C,G,LHP2,DISPLAYA), CONFIG(C,G,LVP1,DISPLAYA),
+ CONFIG(C,G,LHP0,DISPLAYA), CONFIG(D,G,LDI,DISPLAYA), CONFIG(D,G,LPP,DISPLAYA),
+ CONFIG(D,E,LPW0,DISPLAYA), CONFIG(D,E,LPW1,DISPLAYA), CONFIG(D,E,LPW2,DISPLAYA),
+ CONFIG(C,E,LSC1,DISPLAYA), CONFIG(C,E,LM1,DISPLAYA),
+ CONFIG(C,E,LVP0,DISPLAYA), CONFIGEND(),
+ // config 4. SPI
+ CONFIG(D,E,LPW0,DISPLAYA), CONFIG(D,E,LPW2,DISPLAYA), CONFIG(C,E,LSC1,DISPLAYA),
+ CONFIG(C,E,LM0,DISPLAYA), CONFIG(C,E,LVP0,DISPLAYA), CONFIGEND(),
+ // Config 5. Panel 86
+ BRANCH(7),CONFIG(C,E,LSC1,DISPLAYA),CONFIG(C,E,LM1,DISPLAYA),CONFIGEND(),
+ // config 6. 16/18b smart panels
+ BRANCH(7),CONFIG(C,E,LDC,DISPLAYA),CONFIG(D,E,LSPI,DISPLAYA),CONFIGEND(),
+ MODULEDONE(),
+ // subroutine 1. - 18b data + clock
+ CONFIG(C,F,LD0,DISPLAYA), CONFIG(C,F,LD1,DISPLAYA), CONFIG(C,F,LD2,DISPLAYA),
+ CONFIG(C,F,LD3,DISPLAYA), CONFIG(C,F,LD4,DISPLAYA), CONFIG(C,F,LD5,DISPLAYA),
+ CONFIG(C,F,LD6,DISPLAYA), CONFIG(C,F,LD7,DISPLAYA), CONFIG(C,F,LD8,DISPLAYA),
+ CONFIG(C,F,LD9,DISPLAYA), CONFIG(C,F,LD10,DISPLAYA), CONFIG(C,F,LD11,DISPLAYA),
+ CONFIG(C,F,LD12,DISPLAYA), CONFIG(C,F,LD13,DISPLAYA), CONFIG(C,F,LD14,DISPLAYA),
+ CONFIG(C,F,LD15,DISPLAYA), CONFIG(C,G,LD16,DISPLAYA), CONFIG(C,G,LD17,DISPLAYA),
+ CONFIG(C,E,LSC0,DISPLAYA), CONFIGEND(),
+ SUBROUTINESDONE(), // This is required, since BRANCH is used.
+/* For handy reference, here is the complete list of CONFIG macros for the display
+ pad groups, in case any more configurations are defined in the future.
+ CONFIG(C,F,LD0,DISPLAYA), CONFIG(C,F,LD1,DISPLAYA), CONFIG(C,F,LD2,DISPLAYA),
+ CONFIG(C,F,LD3,DISPLAYA), CONFIG(C,F,LD4,DISPLAYA), CONFIG(C,F,LD5,DISPLAYA),
+ CONFIG(C,F,LD6,DISPLAYA), CONFIG(C,F,LD7,DISPLAYA), CONFIG(C,F,LD8,DISPLAYA),
+ CONFIG(C,F,LD9,DISPLAYA), CONFIG(C,F,LD10,DISPLAYA), CONFIG(C,F,LD11,DISPLAYA),
+ CONFIG(C,F,LD12,DISPLAYA),
+ CONFIG(C,F,LD13,DISPLAYA), CONFIG(C,F,LD14,DISPLAYA), CONFIG(C,F,LD15,DISPLAYA),
+ CONFIG(C,G,LD16,DISPLAYA), CONFIG(C,G,LD17,DISPLAYA),CONFIG(C,E,LSC0,DISPLAYA),
+ CONFIG(C,E,LVS,DISPLAYA), CONFIG(D,E,LHS,DISPLAYA), CONFIG(D,E,LSPI,DISPLAYA),
+ CONFIG(C,G,LHP1,DISPLAYA), CONFIG(C,G,LHP2,DISPLAYA), CONFIG(C,G,LHP0,DISPLAYA),
+ CONFIG(C,G,LVP1,DISPLAYA), CONFIG(D,G,LDI,DISPLAYA), CONFIG(D,G,LPP,DISPLAYA),
+ CONFIG(C,E,LCSN,DISPLAYA), CONFIG(C,E,LM1,DISPLAYA),CONFIG(C,E,LM0,DISPLAYA),
+ CONFIG(D,E,LPW0,DISPLAYA),CONFIG(D,E,LPW2,DISPLAYA), CONFIG(D,E,LPW1,DISPLAYA),
+ CONFIG(C,E,LVP0,DISPLAYA), CONFIG(C,E,LDC,DISPLAYA), CONFIG(C,E,LSC1,DISPLAYA),
+ CONFIG(D,E,LSDI,DISPLAYA),
+ */
+};
+
+const NvU32 g_Ap15Mux_Display2[] = {
+ CONFIGEND(),
+ // config 1, 24b RGB. Pure superset of Config2 (18b RGB)
+ BRANCH(2),
+ CONFIG(C,G,LHP1,DISPLAYB),CONFIG(C,G,LHP2,DISPLAYB),CONFIG(C,G,LVP1,DISPLAYB),
+ CONFIG(C,G,LHP0,DISPLAYB),CONFIG(D,G,LDI,DISPLAYB),CONFIG(D,G,LPP,DISPLAYB),
+ CONFIGEND(),
+ // config 2, 18b RGB.
+ BRANCH(7),
+ CONFIG(C,E,LVS,DISPLAYB), CONFIG(D,E,LHS,DISPLAYB), CONFIG(D,E,LSPI,DISPLAYB),
+ CONFIGEND(),
+ // config 3, 8 & 9b CPU.
+ CONFIG(C,G,LHP1,DISPLAYB), CONFIG(C,G,LHP2,DISPLAYB), CONFIG(C,G,LVP1,DISPLAYB),
+ CONFIG(C,G,LHP0,DISPLAYB), CONFIG(D,G,LDI,DISPLAYB), CONFIG(D,G,LPP,DISPLAYB),
+ CONFIG(D,E,LPW0,DISPLAYB), CONFIG(D,E,LPW1,DISPLAYB), CONFIG(D,E,LPW2,DISPLAYB),
+ CONFIG(C,E,LSC1,DISPLAYB), CONFIG(C,E,LM1,DISPLAYB),
+ CONFIG(C,E,LVP0,DISPLAYB), CONFIGEND(),
+ // config 4. SPI
+ CONFIG(D,E,LPW0,DISPLAYB), CONFIG(D,E,LPW2,DISPLAYB), CONFIG(C,E,LSC1,DISPLAYB),
+ CONFIG(C,E,LM0,DISPLAYB), CONFIG(C,E,LVP0,DISPLAYB), CONFIGEND(),
+ // Config 5. USed only for Sony VGA panel
+ BRANCH(7),CONFIG(C,E,LSC1,DISPLAYB),CONFIG(C,E,LM1,DISPLAYB),CONFIGEND(),
+ // config 6. 16/18b smart panels
+ BRANCH(7),CONFIG(C,E,LDC,DISPLAYB),CONFIG(D,E,LSPI,DISPLAYB),CONFIGEND(),
+ MODULEDONE(),
+ // subroutine 1. (config 7)
+ CONFIG(C,F,LD0,DISPLAYB), CONFIG(C,F,LD1,DISPLAYB), CONFIG(C,F,LD2,DISPLAYB),
+ CONFIG(C,F,LD3,DISPLAYB), CONFIG(C,F,LD4,DISPLAYB), CONFIG(C,F,LD5,DISPLAYB),
+ CONFIG(C,F,LD6,DISPLAYB), CONFIG(C,F,LD7,DISPLAYB), CONFIG(C,F,LD8,DISPLAYB),
+ CONFIG(C,F,LD9,DISPLAYB), CONFIG(C,F,LD10,DISPLAYB), CONFIG(C,F,LD11,DISPLAYB),
+ CONFIG(C,F,LD12,DISPLAYB), CONFIG(C,F,LD13,DISPLAYB), CONFIG(C,F,LD14,DISPLAYB),
+ CONFIG(C,F,LD15,DISPLAYB), CONFIG(C,G,LD16,DISPLAYB), CONFIG(C,G,LD17,DISPLAYB),
+ CONFIG(C,E,LSC0,DISPLAYB), CONFIGEND(),
+ SUBROUTINESDONE(),
+};
+
+ const NvU32* g_Ap15MuxDisplay[] = {
+ &g_Ap15Mux_Display1[0],
+ &g_Ap15Mux_Display2[0],
+ NULL
+};
+
+ const NvU32 g_Ap15Mux_Cdev1[] = {
+ // reset config - no-op
+ CONFIGEND(),
+ CONFIG(A,C,CDEV1,PLLA_OUT), CONFIGEND(),
+ CONFIG(A,C,CDEV1,OSC), CONFIGEND(),
+ MODULEDONE()
+};
+
+ const NvU32 g_Ap15Mux_Cdev2[] = {
+ CONFIGEND(),
+ CONFIG(A,C,CDEV2,AHB_CLK), CONFIGEND(),
+ CONFIG(A,C,CDEV2,OSC), CONFIGEND(),
+ MODULEDONE()
+};
+
+ const NvU32 g_Ap15Mux_Csus[] = {
+ CONFIGEND(),
+ CONFIG(A,C,CSUS,VI_SENSOR_CLK), CONFIGEND(),
+ MODULEDONE()
+};
+
+ const NvU32* g_Ap15MuxCdev[] =
+{
+ &g_Ap15Mux_Cdev1[0],
+ &g_Ap15Mux_Cdev2[0],
+ &g_Ap15Mux_Csus[0],
+ NULL
+};
+
+/* Array of all the controller types in the system, pointing to the array of
+ * instances of each controller. Indexed using the NvRmIoModule value.
+ */
+static const NvU32** g_Ap15MuxControllers[] = {
+ &g_Ap15MuxAta[0],
+ &g_Ap15MuxCrt[0],
+ NULL, // no options for CSI
+ &g_Ap15MuxDap[0],
+ &g_Ap15MuxDisplay[0],
+ NULL, // no options for DSI
+ NULL, // no options for GPIO
+ &g_Ap15MuxHdcp[0],
+ &g_Ap15MuxHdmi[0],
+ &g_Ap15MuxHsi[0],
+ &g_Ap15MuxMmc[0],
+ NULL, // no options for I2S
+ &g_Ap15MuxI2c[0],
+ &g_Ap15MuxI2cPmu[0],
+ &g_Ap15MuxKbc[0],
+ &g_Ap15MuxMio[0],
+ &g_Ap15MuxNand[0],
+ &g_Ap15MuxPwm[0],
+ &g_Ap15MuxSdio[0],
+ &g_Ap15MuxSflash[0],
+ &g_Ap15MuxSlink[0],
+ &g_Ap15MuxSpdif[0],
+ &g_Ap15MuxSpi[0],
+ &g_Ap15MuxTwc[0],
+ NULL, // no options for TVO
+ &g_Ap15MuxUart[0],
+ NULL, // no options for USB
+ NULL, // no options for VDD
+ &g_Ap15MuxVi[0],
+ NULL, // no options for XIO
+ &g_Ap15MuxCdev[0],
+ NULL, // no options for Ulpi
+ NULL, // no options for one wire
+ NULL, // no options for sync NOR
+ NULL, // no options for PCI-E
+ NULL, // no options for ETM
+ NULL, // no options for TSENSor
+ &g_Ap15MuxBacklight[0],
+};
+
+NV_CT_ASSERT(NV_ARRAY_SIZE(g_Ap15MuxControllers)==NvOdmIoModule_Num);
+
+const NvU32***
+NvRmAp15GetPinMuxConfigs(NvRmDeviceHandle hDevice)
+{
+ NV_ASSERT(hDevice);
+ return (const NvU32***) g_Ap15MuxControllers;
+}
+
+
+/* Define the GPIO port/pin to tristate mappings */
+
+const NvU16 g_Ap15GpioPadGroupMapping[] =
+{
+ // Port A
+ GPIO_TRISTATE(B,SDB), GPIO_TRISTATE(B,UCB), GPIO_TRISTATE(A,DAP2), GPIO_TRISTATE(A,DAP2),
+ GPIO_TRISTATE(A,DAP2), GPIO_TRISTATE(A,DAP2), GPIO_TRISTATE(B,SDD), GPIO_TRISTATE(B,SDD),
+ // Port B
+ GPIO_TRISTATE(B,XM2A), GPIO_TRISTATE(B,XM2A), GPIO_TRISTATE(D,LPW0), GPIO_TRISTATE(C,LSC0),
+ GPIO_TRISTATE(B,SDC), GPIO_TRISTATE(B,SDC), GPIO_TRISTATE(B,SDC), GPIO_TRISTATE(B,SDC),
+ // Port C
+ GPIO_TRISTATE(B,UCB), GPIO_TRISTATE(D,LPW1), GPIO_TRISTATE(B,UAD), GPIO_TRISTATE(B,UAD),
+ GPIO_TRISTATE(A,RM), GPIO_TRISTATE(A,RM), GPIO_TRISTATE(D,LPW2), GPIO_TRISTATE(B,XM2C),
+ // Port D
+ GPIO_TRISTATE(B,SLXK), GPIO_TRISTATE(B,SLXA), GPIO_TRISTATE(B,SLXB), GPIO_TRISTATE(B,SLXC),
+ GPIO_TRISTATE(B,SLXD), GPIO_TRISTATE(A,DTA), GPIO_TRISTATE(A,DTC), GPIO_TRISTATE(A,DTC),
+ // Port E
+ GPIO_TRISTATE(C,LD0), GPIO_TRISTATE(C,LD1), GPIO_TRISTATE(C,LD2), GPIO_TRISTATE(C,LD3),
+ GPIO_TRISTATE(C,LD4), GPIO_TRISTATE(C,LD5), GPIO_TRISTATE(C,LD6), GPIO_TRISTATE(C,LD7),
+ // Port F
+ GPIO_TRISTATE(C, LD8), GPIO_TRISTATE(C,LD9), GPIO_TRISTATE(C,LD10), GPIO_TRISTATE(C,LD11),
+ GPIO_TRISTATE(C, LD12), GPIO_TRISTATE(C,LD13), GPIO_TRISTATE(C, LD14), GPIO_TRISTATE(C,LD15),
+ // Port G
+ GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC),GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC),
+ GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC),GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC),
+ // Port H
+ GPIO_TRISTATE(A,ATD), GPIO_TRISTATE(A,ATD),GPIO_TRISTATE(A,ATD), GPIO_TRISTATE(A,ATD),
+ GPIO_TRISTATE(B,ATE), GPIO_TRISTATE(B,ATE),GPIO_TRISTATE(B,ATE), GPIO_TRISTATE(B,ATE),
+ // Port I
+ GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATA), GPIO_TRISTATE(A,ATA),
+ GPIO_TRISTATE(A,ATA), GPIO_TRISTATE(A,ATB), GPIO_TRISTATE(A,ATB), GPIO_TRISTATE(A,ATC),
+ // Port J
+ GPIO_TRISTATE(B,XM2S), GPIO_TRISTATE(D,LSPI), GPIO_TRISTATE(B,XM2S), GPIO_TRISTATE(D,LHS),
+ GPIO_TRISTATE(C,LVS), GPIO_TRISTATE(A,IRTX), GPIO_TRISTATE(A,IRRX), GPIO_TRISTATE(B,XM2A),
+ // Port K
+ GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC),
+ GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(B,SPDO), GPIO_TRISTATE(B,SPDI), GPIO_TRISTATE(B,XM2A),
+ // Port L
+ GPIO_TRISTATE(A,DTD), GPIO_TRISTATE(A,DTD), GPIO_TRISTATE(A,DTD), GPIO_TRISTATE(A,DTD),
+ GPIO_TRISTATE(A,DTD), GPIO_TRISTATE(A,DTD), GPIO_TRISTATE(A,DTD), GPIO_TRISTATE(A,DTD),
+ // Port M
+ GPIO_TRISTATE(C,LD16), GPIO_TRISTATE(C,LD17), GPIO_TRISTATE(C,LHP1), GPIO_TRISTATE(C,LHP2),
+ GPIO_TRISTATE(C,LVP1), GPIO_TRISTATE(C,LHP0), GPIO_TRISTATE(D,LDI), GPIO_TRISTATE(D,LPP),
+ // Port N
+ GPIO_TRISTATE(A,DAP1), GPIO_TRISTATE(A,DAP1), GPIO_TRISTATE(A,DAP1), GPIO_TRISTATE(A,DAP1),
+ GPIO_TRISTATE(C,LCSN), GPIO_TRISTATE(D,LSDA), GPIO_TRISTATE(C,LDC), GPIO_TRISTATE(C,HDINT),
+ // Port O
+ GPIO_TRISTATE(B,UAB), GPIO_TRISTATE(B,UAA), GPIO_TRISTATE(B,UAA), GPIO_TRISTATE(B,UAA),
+ GPIO_TRISTATE(B,UAA), GPIO_TRISTATE(B,UAB), GPIO_TRISTATE(B,UAB), GPIO_TRISTATE(B,UAB),
+ // Port P
+ GPIO_TRISTATE(A,DAP3), GPIO_TRISTATE(A,DAP3), GPIO_TRISTATE(A,DAP3), GPIO_TRISTATE(A,DAP3),
+ GPIO_TRISTATE(A,DAP4), GPIO_TRISTATE(A,DAP4), GPIO_TRISTATE(A,DAP4), GPIO_TRISTATE(A,DAP4),
+ // Port Q
+ GPIO_TRISTATE(A,KBCF), GPIO_TRISTATE(A,KBCF), GPIO_TRISTATE(A,KBCF), GPIO_TRISTATE(A,KBCF),
+ GPIO_TRISTATE(A,PMC), GPIO_TRISTATE(A,PMC), GPIO_TRISTATE(A,I2CP), GPIO_TRISTATE(A,I2CP),
+ // Port R
+ GPIO_TRISTATE(A,KBCA), GPIO_TRISTATE(A,KBCA), GPIO_TRISTATE(A,KBCA), GPIO_TRISTATE(A,KBCE),
+ GPIO_TRISTATE(D,KBCD), GPIO_TRISTATE(D,KBCD), GPIO_TRISTATE(D,KBCD), GPIO_TRISTATE(A,KBCB),
+ // Port S
+ GPIO_TRISTATE(A,KBCB), GPIO_TRISTATE(A,KBCB), GPIO_TRISTATE(A,KBCB), GPIO_TRISTATE(A,KBCB),
+ GPIO_TRISTATE(A,KBCB), GPIO_TRISTATE(B,KBCC), GPIO_TRISTATE(B,KBCC), GPIO_TRISTATE(B,KBCC),
+ // Port T
+ GPIO_TRISTATE(A,DTD), GPIO_TRISTATE(A,CSUS), GPIO_TRISTATE(A,DTB), GPIO_TRISTATE(A,DTB),
+ GPIO_TRISTATE(A,PTA), GPIO_TRISTATE(A,PTA), GPIO_TRISTATE(A,PTA), GPIO_TRISTATE(A,PTA),
+ // Port U
+ GPIO_TRISTATE(A,GPU), GPIO_TRISTATE(A,GPU), GPIO_TRISTATE(A,GPU), GPIO_TRISTATE(A,GPU),
+ GPIO_TRISTATE(A,GPU), GPIO_TRISTATE(A,GPU), GPIO_TRISTATE(A,GPU), GPIO_TRISTATE(D,GPU7),
+ // Port V
+ GPIO_TRISTATE(B,UAC), GPIO_TRISTATE(B,UAC), GPIO_TRISTATE(B,UAC), GPIO_TRISTATE(B,UAC),
+ GPIO_TRISTATE(A,GPV), GPIO_TRISTATE(A,GPV), GPIO_TRISTATE(A,GPV), GPIO_TRISTATE(C,LVP0),
+ // Port W
+ GPIO_TRISTATE(C,LM0), GPIO_TRISTATE(C,LM1), GPIO_TRISTATE(B,SPIG), GPIO_TRISTATE(B,SPIH),
+ GPIO_TRISTATE(A,CDEV1), GPIO_TRISTATE(A,CDEV2), GPIO_TRISTATE(B,UCA), GPIO_TRISTATE(B,UCA),
+ // Port X
+ GPIO_TRISTATE(B,SPIA), GPIO_TRISTATE(B,SPIB), GPIO_TRISTATE(B,SPIC), GPIO_TRISTATE(B,SPIC),
+ GPIO_TRISTATE(B,SPID), GPIO_TRISTATE(B,SPIE), GPIO_TRISTATE(B,SPIE), GPIO_TRISTATE(B,SPIF)
+};
+
+NvBool
+NvRmAp15GetPinGroupForGpio(NvRmDeviceHandle hDevice,
+ NvU32 Port,
+ NvU32 Pin,
+ NvU32 *pMapping)
+{
+ const NvU32 GpiosPerPort = 8;
+ NvU32 Index = Port*GpiosPerPort + Pin;
+
+ if ((Pin >= GpiosPerPort) || (Index >= NV_ARRAY_SIZE(g_Ap15GpioPadGroupMapping)))
+ return NV_FALSE;
+
+ *pMapping = (NvU32)g_Ap15GpioPadGroupMapping[Index];
+ return NV_TRUE;
+}
+
+// Top level AP15 clock enable register control macro
+#define CLOCK_ENABLE( rm, offset, field, EnableState ) \
+ do { \
+ regaddr = (CLK_RST_CONTROLLER_##offset##_0); \
+ NvOsMutexLock((rm)->CarMutex); \
+ reg = NV_REGR((rm), NvRmPrivModuleID_ClockAndReset, 0, regaddr); \
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, offset, field, EnableState, reg); \
+ NV_REGW((rm), NvRmPrivModuleID_ClockAndReset, 0, regaddr, reg); \
+ NvOsMutexUnlock((rm)->CarMutex); \
+ } while( 0 )
+
+void NvRmPrivAp15EnableExternalClockSource(
+ NvRmDeviceHandle hDevice,
+ const NvU32* Instance,
+ NvU32 Config,
+ NvBool ClockState)
+{
+ NvU32 MuxCtlShift, MuxCtlSet;
+ NvU32 reg;
+ NvU32 regaddr;
+
+ MuxCtlShift = NV_DRF_VAL(MUX,ENTRY, MUX_CTL_SHIFT, *Instance);
+ MuxCtlSet = NV_DRF_VAL(MUX,ENTRY, MUX_CTL_SET, *Instance);
+
+ if (MuxCtlShift == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV1_SEL_SHIFT)
+ {
+ if (MuxCtlSet == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV1_SEL_PLLA_OUT)
+ {
+ NvRmPrivExternalClockAttach(
+ hDevice, NvRmClockSource_PllA0, ClockState);
+ }
+ CLOCK_ENABLE(hDevice, MISC_CLK_ENB, CLK_ENB_DEV1_OUT, ClockState);
+ }
+ else if (MuxCtlShift == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV2_SEL_SHIFT)
+ {
+ CLOCK_ENABLE(hDevice, MISC_CLK_ENB, CLK_ENB_DEV2_OUT, ClockState);
+ }
+ else if (MuxCtlShift == APB_MISC_PP_PIN_MUX_CTL_C_0_CSUS_SEL_SHIFT)
+ {
+ CLOCK_ENABLE(hDevice, MISC_CLK_ENB, CLK_ENB_SUS_OUT, ClockState);
+ }
+}
+
+NvU32
+NvRmPrivAp15GetExternalClockSourceFreq(
+ NvRmDeviceHandle hDevice,
+ const NvU32* Instance,
+ NvU32 Config)
+{
+ NvU32 MuxCtlShift, MuxCtlSet;
+ NvU32 ClockFreqInKHz = 0;
+
+ MuxCtlShift = NV_DRF_VAL(MUX,ENTRY, MUX_CTL_SHIFT, *Instance);
+ MuxCtlSet = NV_DRF_VAL(MUX,ENTRY, MUX_CTL_SET, *Instance);
+
+ if (MuxCtlShift == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV1_SEL_SHIFT)
+ {
+ if (MuxCtlSet == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV1_SEL_PLLA_OUT)
+ ClockFreqInKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllA0);
+
+ else if (MuxCtlSet == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV1_SEL_OSC)
+ ClockFreqInKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
+ }
+ else if (MuxCtlShift == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV2_SEL_SHIFT)
+ {
+ if (MuxCtlSet == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV2_SEL_AHB_CLK)
+ ClockFreqInKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_Ahb);
+
+ else if (MuxCtlSet == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV2_SEL_OSC)
+ ClockFreqInKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
+ }
+ else if (MuxCtlShift == APB_MISC_PP_PIN_MUX_CTL_C_0_CSUS_SEL_SHIFT)
+ {
+ if (MuxCtlSet == APB_MISC_PP_PIN_MUX_CTL_C_0_CSUS_SEL_VI_SENSOR_CLK)
+ {
+ if (NvRmPowerModuleClockConfig(hDevice, NvRmModuleID_Vi, 0, 0, 0,
+ NULL, 0, &ClockFreqInKHz, NvRmClockConfig_SubConfig) != NvSuccess)
+ {
+ ClockFreqInKHz = 0;
+ }
+ }
+ }
+ return ClockFreqInKHz;
+}
+
+/* These functions will map from the RM's internal definition of module
+ * instances to the ODM definition. Since the RM is controller-centric,
+ * and the ODM pin mux query is interface-centric, the mapping is not
+ * always one-to-one */
+
+NvBool NvRmPrivAp15RmModuleToOdmModule(
+ NvRmModuleID RmModule,
+ NvOdmIoModule *OdmModule,
+ NvU32 *OdmInstance,
+ NvU32 *pCnt)
+{
+ NvRmModuleID Module = NVRM_MODULE_ID_MODULE(RmModule);
+
+ switch (Module)
+ {
+ case NvRmPrivModuleID_Mio_Exio:
+ *OdmModule = NvOdmIoModule_Mio;
+ *OdmInstance = 0; // since there is only one MIO bus on AP15/AP16.
+ *pCnt = 1;
+ return NV_TRUE;
+ default:
+ break;
+ }
+
+ return NV_FALSE;
+}
+
+NvError
+NvRmPrivAp15GetModuleInterfaceCaps(
+ NvOdmIoModule Module,
+ NvU32 Instance,
+ NvU32 PinMap,
+ void *pCaps)
+{
+ NvError err = NvError_NotSupported;
+
+ switch (Module)
+ {
+ case NvOdmIoModule_Sdio:
+ {
+ NvRmModuleSdmmcInterfaceCaps *pSdmmcCaps =
+ (NvRmModuleSdmmcInterfaceCaps *)pCaps;
+ if (Instance==0 &&
+ (PinMap == NvOdmSdioPinMap_Config2 ||
+ PinMap == NvOdmSdioPinMap_Config5))
+ pSdmmcCaps->MmcInterfaceWidth = 8;
+ else if (Instance==1 && PinMap==NvOdmSdioPinMap_Config1)
+ pSdmmcCaps->MmcInterfaceWidth = 8;
+ else
+ pSdmmcCaps->MmcInterfaceWidth = 4;
+ err = NvSuccess;
+ break;
+ }
+ case NvOdmIoModule_Hsmmc:
+ {
+ NvRmModuleSdmmcInterfaceCaps *pSdmmcCaps =
+ (NvRmModuleSdmmcInterfaceCaps *)pCaps;
+ if (Instance==0 && PinMap==NvOdmHsmmcPinMap_Config2)
+ pSdmmcCaps->MmcInterfaceWidth = 4;
+ else
+ pSdmmcCaps->MmcInterfaceWidth = 8;
+ err = NvSuccess;
+ break;
+ }
+ case NvOdmIoModule_Pwm:
+ {
+ NvRmModulePwmInterfaceCaps *pPwmCaps =
+ (NvRmModulePwmInterfaceCaps *)pCaps;
+ err = NvSuccess;
+ if (Instance == 0 && (PinMap == NvOdmPwmPinMap_Config1))
+ pPwmCaps->PwmOutputIdSupported = 15;
+ else if (Instance == 0 && (PinMap == NvOdmPwmPinMap_Config2))
+ pPwmCaps->PwmOutputIdSupported = 13;
+ else if (Instance == 0 && (PinMap == NvOdmPwmPinMap_Config3))
+ pPwmCaps->PwmOutputIdSupported = 1;
+ else if (Instance == 0 && (PinMap == NvOdmPwmPinMap_Config4))
+ pPwmCaps->PwmOutputIdSupported = 12;
+ else
+ {
+ pPwmCaps->PwmOutputIdSupported = 0;
+ err = NvError_NotSupported;
+ }
+ break;
+ }
+ case NvOdmIoModule_Nand:
+ {
+ NvRmModuleNandInterfaceCaps *pNandCaps =
+ (NvRmModuleNandInterfaceCaps *)pCaps;
+ if (Instance == 0)
+ {
+ pNandCaps->IsCombRbsyMode = NV_TRUE;
+ pNandCaps->NandInterfaceWidth = 8;
+
+ if (PinMap == NvOdmNandPinMap_Config4)
+ pNandCaps->IsCombRbsyMode = NV_FALSE;
+
+ if ((PinMap == NvOdmNandPinMap_Config1) ||
+ (PinMap == NvOdmNandPinMap_Config2))
+ pNandCaps->NandInterfaceWidth = 16;
+
+ err = NvSuccess;
+ }
+ else
+ {
+ NV_ASSERT(NV_FALSE);
+ err = NvError_NotSupported;
+ }
+ break;
+ }
+ case NvOdmIoModule_Uart:
+ {
+ NvRmModuleUartInterfaceCaps *pUartCaps =
+ (NvRmModuleUartInterfaceCaps *)pCaps;
+ err = NvSuccess;
+ if (Instance == 0)
+ {
+ if (PinMap == NvOdmUartPinMap_Config1)
+ pUartCaps->NumberOfInterfaceLines = 8;
+ else if (PinMap == NvOdmUartPinMap_Config3)
+ pUartCaps->NumberOfInterfaceLines = 7;
+ else if ((PinMap == NvOdmUartPinMap_Config2) ||
+ (PinMap == NvOdmUartPinMap_Config4))
+ pUartCaps->NumberOfInterfaceLines = 4;
+ else if ((PinMap == NvOdmUartPinMap_Config5) ||
+ (PinMap == NvOdmUartPinMap_Config6))
+ pUartCaps->NumberOfInterfaceLines = 2;
+ else
+ pUartCaps->NumberOfInterfaceLines = 0;
+ }
+ else if (Instance == 1)
+ {
+ if ((PinMap == NvOdmUartPinMap_Config1) ||
+ (PinMap == NvOdmUartPinMap_Config2))
+ pUartCaps->NumberOfInterfaceLines = 4;
+ else if (PinMap == NvOdmUartPinMap_Config3)
+ pUartCaps->NumberOfInterfaceLines = 2;
+ else
+ pUartCaps->NumberOfInterfaceLines = 0;
+ }
+ else if (Instance == 2)
+ {
+ if (PinMap == NvOdmUartPinMap_Config1)
+ pUartCaps->NumberOfInterfaceLines = 4;
+ else if (PinMap == NvOdmUartPinMap_Config2)
+ pUartCaps->NumberOfInterfaceLines = 2;
+ else
+ pUartCaps->NumberOfInterfaceLines = 0;
+ }
+ else
+ {
+ NV_ASSERT(NV_FALSE);
+ err = NvError_NotSupported;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return err;
+}
+
+
+NvError
+NvRmAp15GetStraps(
+ NvRmDeviceHandle hDevice,
+ NvRmStrapGroup StrapGroup,
+ NvU32* pStrapValue)
+{
+ NvU32 reg = NV_REGR(
+ hDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_STRAPPING_OPT_A_0);
+
+ switch (StrapGroup)
+ {
+ case NvRmStrapGroup_RamCode:
+ reg = NV_DRF_VAL(APB_MISC_PP, STRAPPING_OPT_A, RAM_CODE, reg);
+ break;
+ default:
+ return NvError_NotSupported;
+ }
+ *pStrapValue = reg;
+ return NvSuccess;
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_pmc_scratch_map.h b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_pmc_scratch_map.h
new file mode 100644
index 000000000000..38cae693e547
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_pmc_scratch_map.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Power Management Controller (PMC) scratch registers fields
+ * definitions</b>
+ *
+ * @b Description: Defines SW-allocated fields in the PMC scratch registers
+ * shared by boot and power management code in RM and OAL.
+ *
+ */
+
+
+#ifndef INCLUDED_AP15RM_PMC_SCRATCH_MAP_H
+#define INCLUDED_AP15RM_PMC_SCRATCH_MAP_H
+
+/*
+ * Scratch registers offsets are part of the HW specification in the below
+ * include file. Scratch registers fields are defined in this header via
+ * bit ranges compatible with nvrm_drf macros.
+ */
+#include "ap15/arapbpm.h"
+
+// Register APBDEV_PMC_SCRATCH0_0 (this is the only scratch register cleared on reset)
+//
+
+// RM clients combined power state (bits 4-7)
+#define APBDEV_PMC_SCRATCH0_0_RM_PWR_STATE_RANGE 11:8
+#define APBDEV_PMC_SCRATCH0_0_RM_LOAD_TRANSPORT_RANGE 15:12
+#define APBDEV_PMC_SCRATCH0_0_RM_DFS_FLAG_RANGE 27:16
+#define APBDEV_PMC_SCRATCH0_0_UPDATE_MODE_FLAG_RANGE 29:28
+#define APBDEV_PMC_SCRATCH0_0_OAL_RTC_INIT_RANGE 30:30
+#define APBDEV_PMC_SCRATCH0_0_RST_PWR_DET_RANGE 31:31
+
+// Register APBDEV_PMC_SCRATCH20_0, used to store the ODM customer data from the BCT
+#define APBDEV_PMC_SCRATCH20_0_BCT_ODM_DATA_RANGE 31:0
+
+// Register APBDEV_PMC_SCRATCH21_0
+//
+#define APBDEV_PMC_SCRATCH21_0_LP2_TIME_US 31:0
+
+#endif // INCLUDED_AP15RM_PMC_SCRATCH_MAP_H
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power.c
new file mode 100644
index 000000000000..32ec996f6c9b
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Power Resource manager </b>
+ *
+ * @b Description: Implements the interface of the NvRM Power.
+ *
+ */
+
+#include "nvrm_power_private.h"
+#include "nvrm_pmu.h"
+#include "nvrm_pmu_private.h"
+#include "nvassert.h"
+#include "nvrm_drf.h"
+#include "nvrm_hwintf.h"
+#include "nvodm_query_discovery.h"
+#include "ap15rm_private.h"
+#include "ap15rm_clocks.h"
+#include "ap15/arapbpm.h"
+#include "ap15/project_relocation_table.h"
+
+// TODO: Always Disable before check-in
+// Module debug: 0=disable, 1=enable
+#define NVRM_ENABLE_PRINTF (0)
+
+#if (NV_DEBUG && NVRM_ENABLE_PRINTF)
+#define NVRM_POWER_PRINTF(x) NvOsDebugPrintf x
+#else
+#define NVRM_POWER_PRINTF(x)
+#endif
+
+
+#if !NV_OAL
+/*****************************************************************************/
+/*****************************************************************************/
+
+#define NV_POWER_GATE_TD (1)
+// TODO: check PCIE voltage control calls before enabling
+#define NV_POWER_GATE_PCIE (0)
+// TODO: check VDE/BSEV/NSEA voltage control calls before enabling
+#define NV_POWER_GATE_VDE (0)
+// TODO: check MPE voltage control calls before enabling
+#define NV_POWER_GATE_MPE (0)
+
+// Power Group -to- Power Gating Ids mapping
+static const NvU32* s_PowerGroupIds = NULL;
+
+/*****************************************************************************/
+
+static NvBool IsPowerGateSupported(NvU32 PowerGroup)
+{
+ // 1st check h/w support capabilities
+ NV_ASSERT(s_PowerGroupIds);
+ if (s_PowerGroupIds[PowerGroup] == NV_POWERGROUP_INVALID)
+ return NV_FALSE;
+
+ // now check s/w support
+ switch (PowerGroup)
+ {
+ case NV_POWERGROUP_TD:
+ return NV_POWER_GATE_TD;
+ case NV_POWERGROUP_PCIE:
+ return NV_POWER_GATE_PCIE;
+ case NV_POWERGROUP_VDE:
+ return NV_POWER_GATE_VDE;
+ case NV_POWERGROUP_MPE:
+ return NV_POWER_GATE_MPE;
+ default:
+ return NV_FALSE;
+ }
+}
+
+static void PowerGroupResetControl(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 PowerGroup,
+ NvBool Assert)
+{
+ switch (PowerGroup)
+ {
+ case NV_POWERGROUP_TD:
+ NvRmModuleResetWithHold(hRmDeviceHandle, NvRmModuleID_3D, Assert);
+ break;
+ case NV_POWERGROUP_PCIE:
+ if (Assert) // Keep PHY in reset - let driver to take it out
+ NvRmModuleResetWithHold(
+ hRmDeviceHandle, NvRmPrivModuleID_PcieXclk, Assert);
+ NvRmModuleResetWithHold(
+ hRmDeviceHandle, NvRmPrivModuleID_Pcie, Assert);
+ NvRmModuleResetWithHold(
+ hRmDeviceHandle, NvRmPrivModuleID_Afi, Assert);
+ break;
+ case NV_POWERGROUP_VDE:
+ NvRmModuleResetWithHold(hRmDeviceHandle, NvRmModuleID_Vde, Assert);
+ break;
+ case NV_POWERGROUP_MPE:
+ NvRmModuleResetWithHold(hRmDeviceHandle, NvRmModuleID_Mpe, Assert);
+ break;
+ default:
+ break;
+ }
+}
+
+static void PowerGroupClockControl(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 PowerGroup,
+ NvBool Enable)
+{
+ ModuleClockState ClockState =
+ Enable ? ModuleClockState_Enable : ModuleClockState_Disable;
+
+ switch (PowerGroup)
+ {
+ case NV_POWERGROUP_TD:
+ NvRmPrivEnableModuleClock(
+ hRmDeviceHandle, NvRmModuleID_3D, ClockState);
+ break;
+ case NV_POWERGROUP_PCIE:
+ NvRmPrivEnableModuleClock(
+ hRmDeviceHandle, NvRmPrivModuleID_Pcie, ClockState);
+ break;
+ case NV_POWERGROUP_VDE:
+ NvRmPrivEnableModuleClock(
+ hRmDeviceHandle, NvRmModuleID_Vde, ClockState);
+ break;
+ case NV_POWERGROUP_MPE:
+ NvRmPrivEnableModuleClock(
+ hRmDeviceHandle, NvRmModuleID_Mpe, ClockState);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+NvRmPrivPowerGroupControl(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 PowerGroup,
+ NvBool Enable)
+{
+ NvU32 reg, Id, Mask, Status;
+ NVRM_POWER_PRINTF(("%s Power Group %d\n",
+ (Enable ? "Enable" : "Disable"), PowerGroup));
+
+ // Do nothing if not SoC platform
+ NV_ASSERT(hRmDeviceHandle);
+ if (NvRmPrivGetExecPlatform(hRmDeviceHandle) != ExecPlatform_Soc)
+ return;
+
+ // Do nothing if power gating is not supported for this group
+ if (PowerGroup >= NV_POWERGROUP_MAX)
+ return; // "virtual" groups are always On
+ if (!IsPowerGateSupported(PowerGroup))
+ return;
+
+ // Do nothing if power group is already in requested state
+ NV_ASSERT(s_PowerGroupIds[PowerGroup] != NV_POWERGROUP_INVALID);
+ Id = s_PowerGroupIds[PowerGroup];
+ Mask = (0x1 << Id);
+ Status = Mask & NV_REGR(hRmDeviceHandle, NvRmModuleID_Pmif, 0,
+ APBDEV_PMC_PWRGATE_STATUS_0);
+ if (Enable == (Status != 0x0))
+ return;
+
+ /*
+ * Gating procedure:
+ * - assert resets to all modules in power group
+ * - toggle power gate
+ *
+ * Ungating procedure
+ * - assert resets to all modules in power group (redundunt)
+ * - toggle power gate
+ * - enable clocks to all modules in power group
+ * - reset propagation delay
+ * - remove clamping
+ * - de-assert reset to all modules in power group
+ * - disable clocks to all modules in power group
+ *
+ * Special note on toggle timers( shared with OAL which does CPU power
+ * gating): per convention with OAL default settings are never changed.
+ */
+ PowerGroupResetControl(hRmDeviceHandle, PowerGroup, NV_TRUE);
+
+ reg = NV_DRF_DEF(APBDEV_PMC, PWRGATE_TOGGLE, START, ENABLE) | Id;
+ NV_REGW(hRmDeviceHandle, NvRmModuleID_Pmif, 0,
+ APBDEV_PMC_PWRGATE_TOGGLE_0, reg);
+ for (;;)
+ {
+ reg = NV_REGR(hRmDeviceHandle, NvRmModuleID_Pmif, 0,
+ APBDEV_PMC_PWRGATE_STATUS_0);
+ if (Status != (reg & Mask))
+ break;
+ }
+ if (Enable)
+ {
+ PowerGroupClockControl(hRmDeviceHandle, PowerGroup, NV_TRUE);
+ NvOsWaitUS(NVRM_RESET_DELAY);
+
+ // PCIE and VDE clamping masks are swapped relatively to
+ // partition Ids (bug 602975)
+ if (PowerGroup == NV_POWERGROUP_PCIE)
+ Mask = 0x1 << s_PowerGroupIds[NV_POWERGROUP_VDE];
+ else if (PowerGroup == NV_POWERGROUP_VDE)
+ Mask = 0x1 << s_PowerGroupIds[NV_POWERGROUP_PCIE];
+
+ NV_REGW(hRmDeviceHandle, NvRmModuleID_Pmif, 0,
+ APBDEV_PMC_REMOVE_CLAMPING_CMD_0, Mask);
+ for (;;)
+ {
+ reg = NV_REGR(hRmDeviceHandle, NvRmModuleID_Pmif, 0,
+ APBDEV_PMC_REMOVE_CLAMPING_CMD_0);
+ if (reg == 0)
+ break;
+ }
+ PowerGroupResetControl(hRmDeviceHandle, PowerGroup, NV_FALSE);
+ PowerGroupClockControl(hRmDeviceHandle, PowerGroup, NV_FALSE);
+ }
+}
+
+NvRmMilliVolts
+NvRmPrivPowerGroupGetVoltage(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 PowerGroup)
+{
+ NvRmMilliVolts Voltage = NvRmVoltsUnspecified;
+ if (PowerGroup >= NV_POWERGROUP_MAX)
+ return Voltage; // "virtual" groups are always On
+
+ // Do not check non-gated power group - it is On by definition
+ if (s_PowerGroupIds[PowerGroup] != NV_POWERGROUP_INVALID)
+ {
+ NvU32 reg = NV_REGR(
+ hRmDeviceHandle, NvRmModuleID_Pmif, 0, APBDEV_PMC_PWRGATE_STATUS_0);
+ if ((reg & (0x1 << s_PowerGroupIds[PowerGroup])) == 0x0)
+ {
+ // Specified power group is gated
+ Voltage = NvRmVoltsOff;
+ }
+ }
+ return Voltage;
+}
+
+void NvRmPrivPowerGroupControlInit(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvU32 i, Size;
+
+ // Init chip specific power group map
+ if ((hRmDeviceHandle->ChipId.Id == 0x15) ||
+ (hRmDeviceHandle->ChipId.Id == 0x16))
+ {
+ NvRmPrivAp15PowerGroupTableInit(&s_PowerGroupIds, &Size);
+ }
+ else if (hRmDeviceHandle->ChipId.Id == 0x20)
+ {
+ NvRmPrivAp20PowerGroupTableInit(&s_PowerGroupIds, &Size);
+ }
+ else
+ {
+ NV_ASSERT(!"Unsupported chip ID");
+ }
+ NV_ASSERT(Size == NV_POWERGROUP_MAX);
+
+ // Power gate supported partitions
+ for (i = 0; i < NV_POWERGROUP_MAX; i++)
+ NvRmPrivPowerGroupControl(hRmDeviceHandle, i, NV_FALSE);
+}
+
+#endif // !NV_OAL
+/*****************************************************************************/
+/*****************************************************************************/
+
+#define NV_NO_IOPOWER_CONTROL (1)
+#define NV_RAIL_ADDR_INVALID ((NvU32)-1)
+
+typedef struct IoPowerDetectInfoRec
+{
+ // SoC Power rail GUID
+ NvU64 PowerRailId;
+
+ // IO Power rail disable bit mask
+ NvU32 DisableRailMask;
+
+ // IO Power Detect cell enable bit mask
+ NvU32 EnablePwrDetMask;
+
+ // PMU Rail Address
+ NvU32 PmuRailAddress;
+
+} IoPowerDetectInfo;
+
+static IoPowerDetectInfo s_IoPowerDetectMap[] =
+{
+ {NV_VDD_SYS_ODM_ID, (0x1 << 0), (0x1 << 0), 0},
+ {NV_VDD_NAND_ODM_ID, (0x1 << 1), (0x1 << 1), 0},
+ {NV_VDD_UART_ODM_ID, (0x1 << 2), (0x1 << 2), 0},
+ {NV_VDD_BB_ODM_ID, (0x1 << 3), (0x1 << 3), 0},
+
+ {NV_VDD_VI_ODM_ID, (0x1 << 4), (0x1 << 4), 0},
+ {NV_VDD_AUD_ODM_ID, (0x1 << 5), (0x1 << 5), 0},
+ {NV_VDD_LCD_ODM_ID, (0x1 << 6), (0x1 << 6), 0},
+ {NV_VDD_DDR_ODM_ID, (0x1 << 7), (0x1 << 7), 0},
+
+ {NV_VDD_SDIO_ODM_ID, (0x1 << 8), (0x1 << 8), 0},
+ {NV_VDD_MIPI_ODM_ID, (0x1 << 9), (0x0), 0} // No detect cell
+};
+
+static void IoPowerMapRail(
+ NvU32 PmuRailAddress,
+ NvU32* pIoPwrDetectMask,
+ NvU32* pNoIoPwrMask)
+{
+ NvU32 i;
+ *pIoPwrDetectMask = 0;
+ *pNoIoPwrMask = 0;
+
+ // Find all power detect cells and controls on this IO rail
+ for (i = 0; i < NV_ARRAY_SIZE(s_IoPowerDetectMap); i++)
+ {
+ if (s_IoPowerDetectMap[i].PmuRailAddress == PmuRailAddress)
+ {
+ *pIoPwrDetectMask |= s_IoPowerDetectMap[i].EnablePwrDetMask;
+ *pNoIoPwrMask |= s_IoPowerDetectMap[i].DisableRailMask;
+ }
+ }
+}
+
+void NvRmPrivIoPowerControlInit(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvU32 i, v;
+ NvU32 NoIoPwrMask = 0;
+ const NvOdmPeripheralConnectivity* pPmuRail = NULL;
+
+ if (NvRmPrivGetExecPlatform(hRmDeviceHandle) != ExecPlatform_Soc)
+ {
+ // Invalidate IO Power detect map if not SoC platform
+ for (i = 0; i < NV_ARRAY_SIZE(s_IoPowerDetectMap); i++)
+ s_IoPowerDetectMap[i].PmuRailAddress = NV_RAIL_ADDR_INVALID;
+ return;
+ }
+
+ for (i = 0; i < NV_ARRAY_SIZE(s_IoPowerDetectMap); i++)
+ {
+ // Fill in PMU rail addresses in IO Power detect map
+ pPmuRail = NvOdmPeripheralGetGuid(s_IoPowerDetectMap[i].PowerRailId);
+ NV_ASSERT(pPmuRail && pPmuRail->NumAddress);
+ s_IoPowerDetectMap[i].PmuRailAddress = pPmuRail->AddressList[0].Address;
+
+ // Find all unpowered rails
+ v = NvRmPrivPmuRailGetVoltage(
+ hRmDeviceHandle, s_IoPowerDetectMap[i].PowerRailId);
+ if (v == ODM_VOLTAGE_OFF)
+ NoIoPwrMask |= s_IoPowerDetectMap[i].DisableRailMask;
+ }
+
+ // Latch already powered IO rails
+ NvRmPrivIoPowerDetectLatch(hRmDeviceHandle);
+
+ // Disable IO pads for unpowered rails
+ if (NoIoPwrMask)
+ NvRmPrivIoPowerControl(hRmDeviceHandle, NoIoPwrMask, NV_FALSE);
+}
+
+void NvRmPrivIoPowerDetectStart(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 PwrDetMask)
+{
+// (1) Enable specified power detect cell
+ NV_REGW(hRmDeviceHandle,
+ NvRmModuleID_Pmif, 0, APBDEV_PMC_PWR_DET_0, PwrDetMask);
+
+// (2-3) Set power detect latches for enabled cells to safe "1" (high) value
+ if ((hRmDeviceHandle->ChipId.Id == 0x15) ||
+ (hRmDeviceHandle->ChipId.Id == 0x16))
+ {
+ // On AP15/AP16 set/clear reset bit in PMC scratch0
+ NvRmPrivAp15IoPowerDetectReset(hRmDeviceHandle);
+
+ // For AP15 A01 chip the above reset does nothing, therefore
+ // need to set latch "pass-thru" before transition
+ if ((hRmDeviceHandle->ChipId.Id == 0x15) &&
+ (hRmDeviceHandle->ChipId.Major == 0x01) &&
+ (hRmDeviceHandle->ChipId.Minor == 0x01))
+ {
+ NvOsWaitUS(NVRM_PWR_DET_DELAY_US);
+ NV_REGW(hRmDeviceHandle,
+ NvRmModuleID_Pmif, 0, APBDEV_PMC_PWR_DET_LATCH_0, 1);
+ }
+ }
+ else
+ {
+ // On AP20+ reset high values directly
+ NvRmPrivAp20IoPowerDetectReset(hRmDeviceHandle);
+ }
+}
+//
+// (4) Power rail OFF -> ON transition and stabilization
+//
+void NvRmPrivIoPowerDetectLatch(NvRmDeviceHandle hRmDeviceHandle)
+{
+// (5) Set latch "pass-thru"
+// (6) Latch results
+// (7) Disable all power detect cells
+ NV_REGW(hRmDeviceHandle,
+ NvRmModuleID_Pmif, 0, APBDEV_PMC_PWR_DET_LATCH_0, 1);
+ NV_REGW(hRmDeviceHandle,
+ NvRmModuleID_Pmif, 0, APBDEV_PMC_PWR_DET_LATCH_0, 0);
+ NV_REGW(hRmDeviceHandle,
+ NvRmModuleID_Pmif, 0, APBDEV_PMC_PWR_DET_0, 0);
+}
+
+void NvRmPrivIoPowerControl(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 NoIoPwrMask,
+ NvBool Enable)
+{
+ NvU32 reg = NV_REGR(
+ hRmDeviceHandle, NvRmModuleID_Pmif, 0, APBDEV_PMC_NO_IOPOWER_0);
+ reg = Enable ? (reg & (~NoIoPwrMask)) : (reg | NoIoPwrMask);
+
+#if NV_NO_IOPOWER_CONTROL
+ NV_REGW(hRmDeviceHandle,
+ NvRmModuleID_Pmif, 0, APBDEV_PMC_NO_IOPOWER_0, reg);
+#endif
+}
+
+void
+NvRmPrivSetSocRailPowerState(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 PmuRailAddress,
+ NvBool Enable,
+ NvU32* pIoPwrDetectMask,
+ NvU32* pNoIoPwrMask)
+{
+ IoPowerMapRail(PmuRailAddress, pIoPwrDetectMask, pNoIoPwrMask);
+ if ((*pIoPwrDetectMask == 0) && (*pNoIoPwrMask == 0))
+ return; // Exit if not mapped rail
+
+ if (Enable)
+ {
+ // On/Off transition: activate power detect cells and keep control
+ // masks so that the results can be latched and IO pads enabled after
+ // the transition is completed
+ if (*pIoPwrDetectMask != 0)
+ NvRmPrivIoPowerDetectStart(hRmDeviceHandle, *pIoPwrDetectMask);
+ }
+ else
+ {
+ // Off/On transition: disable IO pads, and clear control masks,
+ // as no action is required after the transition is completed
+ if (*pNoIoPwrMask != 0)
+ NvRmPrivIoPowerControl(hRmDeviceHandle, *pNoIoPwrMask, NV_FALSE);
+ *pIoPwrDetectMask = *pNoIoPwrMask = 0;
+ }
+}
+
+/*****************************************************************************/
+
+void NvRmPrivCoreVoltageInit(NvRmDeviceHandle hRmDevice)
+{
+ NvU32 CoreRailAddress, RtcRailAddress, CpuRailAddress;
+ const NvOdmPeripheralConnectivity* pPmuRail;
+ NvRmMilliVolts CurrentCoreMv = 0;
+ NvRmMilliVolts CurrentRtcMv = 0;
+ NvRmMilliVolts NominalCoreMv = NvRmPrivGetNominalMV(hRmDevice);
+
+ NV_ASSERT(hRmDevice);
+
+ if (NvRmPrivGetExecPlatform(hRmDevice) != ExecPlatform_Soc)
+ {
+ return;
+ }
+
+ pPmuRail = NvOdmPeripheralGetGuid(NV_VDD_CORE_ODM_ID);
+ NV_ASSERT(pPmuRail);
+ NV_ASSERT(pPmuRail->NumAddress);
+ CoreRailAddress = pPmuRail->AddressList[0].Address;
+
+ pPmuRail = NvOdmPeripheralGetGuid(NV_VDD_RTC_ODM_ID);
+ NV_ASSERT(pPmuRail);
+ NV_ASSERT(pPmuRail->NumAddress);
+ RtcRailAddress = pPmuRail->AddressList[0].Address;
+
+ // This function is called during PMU initialization when current (= boot)
+ // core voltage is expected to be within one safe step from nominal, and
+ // RTC voltage must be within one safe step from the core. Set nominal
+ // voltage (bump PMU ref count), if the above conditions are true.
+ NvRmPmuGetVoltage(hRmDevice, CoreRailAddress, &CurrentCoreMv);
+ NvRmPmuGetVoltage(hRmDevice, RtcRailAddress, &CurrentRtcMv);
+ if((CurrentCoreMv > (NominalCoreMv + NVRM_SAFE_VOLTAGE_STEP_MV)) ||
+ ((CurrentCoreMv + NVRM_SAFE_VOLTAGE_STEP_MV) < NominalCoreMv))
+ {
+ NV_ASSERT(!"Unexpected initial core voltage");
+ return;
+ }
+ if((CurrentRtcMv > (CurrentCoreMv + NVRM_SAFE_VOLTAGE_STEP_MV)) ||
+ ((CurrentRtcMv + NVRM_SAFE_VOLTAGE_STEP_MV) < CurrentCoreMv))
+ {
+ NV_ASSERT(!"Unexpected initial RTC voltage");
+ return;
+ }
+ NvRmPmuSetVoltage(hRmDevice, RtcRailAddress, NominalCoreMv, NULL);
+ NvRmPmuSetVoltage(hRmDevice, CoreRailAddress, NominalCoreMv, NULL);
+
+ // If the platform has dedicated CPU voltage rail, make sure it is set to
+ // nominal level first. Similar to the core, CPU boot voltage is expected
+ // to be within one safe step from nominal.
+ if (NvRmPrivIsCpuRailDedicated(hRmDevice))
+ {
+ NvRmMilliVolts CurrentCpuMv = 0;
+ NvRmMilliVolts NominalCpuMv = NvRmPrivModuleVscaleGetMV(
+ hRmDevice, NvRmModuleID_Cpu, NvRmFreqMaximum);
+
+ pPmuRail = NvOdmPeripheralGetGuid(NV_VDD_CPU_ODM_ID);
+ NV_ASSERT(pPmuRail);
+ NV_ASSERT(pPmuRail->NumAddress);
+ CpuRailAddress = pPmuRail->AddressList[0].Address;
+
+ NvRmPmuGetVoltage(hRmDevice, CpuRailAddress, &CurrentCpuMv);
+ if((CurrentCpuMv > (NominalCpuMv + NVRM_SAFE_VOLTAGE_STEP_MV)) ||
+ ((CurrentCpuMv + NVRM_SAFE_VOLTAGE_STEP_MV) < NominalCpuMv))
+ {
+ NV_ASSERT(!"Unexpected initial CPU voltage");
+ return;
+ }
+ NvRmPmuSetVoltage(hRmDevice, CpuRailAddress, NominalCpuMv, NULL);
+ }
+
+ // Always On System I/O, DDR IO and RX DDR (if exist) - set nominal,
+ // bump ref count
+ NvRmPrivPmuRailControl(hRmDevice, NV_VDD_SYS_ODM_ID, NV_TRUE);
+ NvRmPrivPmuRailControl(hRmDevice, NV_VDD_DDR_ODM_ID, NV_TRUE);
+ if (NvOdmPeripheralGetGuid(NV_VDD_DDR_RX_ODM_ID))
+ NvRmPrivPmuRailControl(hRmDevice, NV_VDD_DDR_RX_ODM_ID, NV_TRUE);
+}
+
+/*****************************************************************************/
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power_dfs.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power_dfs.c
new file mode 100644
index 000000000000..9f19d6c9a12a
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power_dfs.c
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Dynamic Frequency Scaling manager </b>
+ *
+ * @b Description: Implements NvRM Dynamic Frequency Scaling (DFS)
+ * manager for SOC-wide clock domains.
+ *
+ */
+
+#include "nvrm_power_dfs.h"
+#include "nvassert.h"
+#include "nvrm_drf.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_pmu.h"
+#include "ap15rm_power_dfs.h"
+#include "ap15/arstat_mon.h"
+#include "ap15/arvde_mon.h"
+#include "ap15/aremc.h"
+#include "ap15/arclk_rst.h"
+#include "ap15/arapb_misc.h"
+#include "ap15/artimerus.h"
+
+/*****************************************************************************/
+
+// Regsiter access macros for System Statistic module
+#define NV_SYSTAT_REGR(pSystatRegs, reg) \
+ NV_READ32((((NvU32)(pSystatRegs)) + STAT_MON_##reg##_0))
+#define NV_SYSTAT_REGW(pSystatRegs, reg, val) \
+ NV_WRITE32((((NvU32)(pSystatRegs)) + STAT_MON_##reg##_0), (val))
+
+// Regsiter access macros for VDE module
+#define NV_VDE_REGR(pVdeRegs, reg) \
+ NV_READ32((((NvU32)(pVdeRegs)) + ARVDE_PPB_##reg##_0))
+#define NV_VDE_REGW(pVdeRegs, reg, val) \
+ NV_WRITE32((((NvU32)(pVdeRegs)) + ARVDE_PPB_##reg##_0), (val))
+
+// Regsiter access macros for EMC module
+#define NV_EMC_REGR(pEmcRegs, reg) \
+ NV_READ32((((NvU32)(pEmcRegs)) + EMC_##reg##_0))
+#define NV_EMC_REGW(pEmcRegs, reg, val) \
+ NV_WRITE32((((NvU32)(pEmcRegs)) + EMC_##reg##_0), (val))
+
+// Regsiter access macros for CAR module
+#define NV_CAR_REGR(pCarRegs, reg) \
+ NV_READ32((((NvU32)(pCarRegs)) + CLK_RST_CONTROLLER_##reg##_0))
+#define NV_CAR_REGW(pCarRegs, reg, val) \
+ NV_WRITE32((((NvU32)(pCarRegs)) + CLK_RST_CONTROLLER_##reg##_0), (val))
+
+// Regsiter access macros for APB MISC module
+#define NV_APB_REGR(pApbRegs, reg) \
+ NV_READ32((((NvU32)(pApbRegs)) + APB_MISC_##reg##_0))
+#define NV_APB_REGW(pApbRegs, reg, val) \
+ NV_WRITE32((((NvU32)(pApbRegs)) + APB_MISC_##reg##_0), (val))
+
+/*****************************************************************************/
+// SYSTEM STATISTIC MODULE INTERFACES
+/*****************************************************************************/
+
+NvError NvRmPrivAp15SystatMonitorsInit(NvRmDfs* pDfs)
+{
+ NvError error;
+ NvU32 RegValue;
+ void* pSystatRegs = pDfs->Modules[NvRmDfsModuleId_Systat].pBaseReg;
+ NV_ASSERT(pSystatRegs);
+
+ /*
+ * System Statistic Monitor module belongs to DFS, therefore it is full
+ * initialization: Enable Clock => Reset => clear all control registers
+ * including interrupt status flags (cleared by writing "1"). Note that
+ * all monitors - used, or not used by DFS - are initialized. (The VPIPE
+ * monitor in this module does not provide neccessary data for DFS; the
+ * VDE idle monitor is employed for video-pipe domain control)
+ */
+ error = NvRmPowerModuleClockControl(
+ pDfs->hRm, NvRmModuleID_SysStatMonitor, pDfs->PowerClientId, NV_TRUE);
+ if (error != NvSuccess)
+ {
+ return error;
+ }
+ NvRmModuleReset(pDfs->hRm, NvRmModuleID_SysStatMonitor);
+
+ RegValue = NV_DRF_NUM(STAT_MON, CPU_MON_CTRL, INT, 1);
+ NV_SYSTAT_REGW(pSystatRegs, CPU_MON_CTRL, RegValue);
+
+ RegValue = NV_DRF_NUM(STAT_MON, COP_MON_CTRL, INT, 1);
+ NV_SYSTAT_REGW(pSystatRegs, COP_MON_CTRL, RegValue);
+
+ RegValue = NV_DRF_NUM(STAT_MON, CACHE2_MON_CTRL, INT, 1);
+ NV_SYSTAT_REGW(pSystatRegs, CACHE2_MON_CTRL, RegValue);
+
+ RegValue = NV_DRF_NUM(STAT_MON, AHB_MON_CTRL, INT, 1);
+ NV_SYSTAT_REGW(pSystatRegs, AHB_MON_CTRL, RegValue);
+
+ RegValue = NV_DRF_NUM(STAT_MON, APB_MON_CTRL, INT, 1);
+ NV_SYSTAT_REGW(pSystatRegs, APB_MON_CTRL, RegValue);
+
+ RegValue = NV_DRF_NUM(STAT_MON, VPIPE_MON_CTRL, INT, 1);
+ NV_SYSTAT_REGW(pSystatRegs, VPIPE_MON_CTRL, RegValue);
+
+ RegValue = NV_DRF_NUM(STAT_MON, SMP_MON_CTRL, INT, 1);
+ NV_SYSTAT_REGW(pSystatRegs, SMP_MON_CTRL, RegValue);
+
+ return NvSuccess;
+}
+
+void NvRmPrivAp15SystatMonitorsDeinit(NvRmDfs* pDfs)
+{
+ // First initialize monitor, and then just turn off the clock (twice to
+ // balance the clock control)
+ (void)NvRmPrivAp15SystatMonitorsInit(pDfs);
+ (void)NvRmPowerModuleClockControl(
+ pDfs->hRm, NvRmModuleID_SysStatMonitor, pDfs->PowerClientId, NV_FALSE);
+ (void)NvRmPowerModuleClockControl(
+ pDfs->hRm, NvRmModuleID_SysStatMonitor, pDfs->PowerClientId, NV_FALSE);
+
+}
+
+void
+NvRmPrivAp15SystatMonitorsStart(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ const NvU32 IntervalMs)
+{
+ NvU32 RegValue;
+ NvU32 msec = IntervalMs - 1; // systat monitors use (n+1) ms counters
+ void* pSystatRegs = pDfs->Modules[NvRmDfsModuleId_Systat].pBaseReg;
+
+ /*
+ * Start AVP (COP) monitor for the next sample period. Interrupt is
+ * cleared (by writing "1") and left disabled. Monitor is counting
+ * System clock cycles while AVP is halted by flow controller. Note
+ * that AVP monitor is counting System (not AVP!) clock cycles
+ */
+ RegValue = NV_DRF_DEF(STAT_MON, COP_MON_CTRL, ENB, ENABLE) |
+ NV_DRF_NUM(STAT_MON, COP_MON_CTRL, INT, 1) |
+ NV_DRF_NUM(STAT_MON, COP_MON_CTRL, SAMPLE_PERIOD, msec);
+ NV_SYSTAT_REGW(pSystatRegs, COP_MON_CTRL, RegValue);
+
+ /*
+ * Start AHB monitor for the next sample period. Interrupt is cleared
+ * (by writing "1") and left disabled. Monitor is counting AHB clock
+ * cycles while there is no data transfer on AHB initiated by any master
+ */
+ RegValue = NV_DRF_DEF(STAT_MON, AHB_MON_CTRL, ENB, ENABLE) |
+ NV_DRF_NUM(STAT_MON, AHB_MON_CTRL, INT, 1) |
+ NV_DRF_DEF(STAT_MON, AHB_MON_CTRL, MST_NUMBER, DEFAULT_MASK) |
+ NV_DRF_NUM(STAT_MON, AHB_MON_CTRL, SAMPLE_PERIOD, msec);
+ NV_SYSTAT_REGW(pSystatRegs, AHB_MON_CTRL, RegValue);
+
+ /*
+ * Start APB monitor for the next sample period. Interrupt is cleared
+ * (by writing "1") and left disabled. Monitor is counting APB clock
+ * cycles while there is no data transfer on APB targeted to any slave
+ */
+ RegValue = NV_DRF_DEF(STAT_MON, APB_MON_CTRL, ENB, ENABLE) |
+ NV_DRF_NUM(STAT_MON, APB_MON_CTRL, INT, 1) |
+ NV_DRF_DEF(STAT_MON, APB_MON_CTRL, SLV_NUMBER, DEFAULT_MASK) |
+ NV_DRF_NUM(STAT_MON, APB_MON_CTRL, SAMPLE_PERIOD, msec);
+ NV_SYSTAT_REGW(pSystatRegs, APB_MON_CTRL, RegValue);
+
+ /*
+ * Start CPU monitor for the next sample period. Interrupt is cleared
+ * (by writing "1") and enabled, since CPU monitor is used to generate
+ * DFS interrupt. Monitor is counting System clock cycles while CPU is
+ * halted by flow controller. Note: CPU monitor is counting System (not
+ * CPU!) clock cycles
+ */
+ RegValue = NV_DRF_DEF(STAT_MON, CPU_MON_CTRL, ENB, ENABLE) |
+ NV_DRF_DEF(STAT_MON, CPU_MON_CTRL, INT_EN, ENABLE) |
+ NV_DRF_NUM(STAT_MON, CPU_MON_CTRL, INT, 1) |
+ NV_DRF_NUM(STAT_MON, CPU_MON_CTRL, SAMPLE_PERIOD, msec);
+ NV_SYSTAT_REGW(pSystatRegs, CPU_MON_CTRL, RegValue);
+
+ // Initialize LP2 time storage (WAR for bug 429585)
+ NvRmPrivSetLp2TimeUS(pDfs->hRm, 0);
+}
+
+void
+NvRmPrivAp15SystatMonitorsRead(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfsIdleData* pIdleData)
+{
+ NvU32 RegValue;
+ NvU64 temp;
+ void* pSystatRegs = pDfs->Modules[NvRmDfsModuleId_Systat].pBaseReg;
+ NvBool NoLp2Offset = pDfs->Modules[NvRmDfsModuleId_Systat].Offset !=
+ NVRM_CPU_IDLE_LP2_OFFSET;
+
+ /*
+ * Read AVP (COP) monitor: disable it (=stop, the readings are preserved)
+ * and clear interrupt status bit (by writing "1"). Then, read AVP idle
+ * count. Since AVP monitor is counting System (not AVP!) clock cycles,
+ * the monitor reading is converted to AVP clocks before storing it in
+ * idle data packet.
+ */
+ RegValue = NV_DRF_NUM(STAT_MON, COP_MON_CTRL, INT, 1);
+ NV_SYSTAT_REGW(pSystatRegs, COP_MON_CTRL, RegValue);
+
+ RegValue = NV_SYSTAT_REGR(pSystatRegs, COP_MON_STATUS);
+ RegValue = NV_DRF_VAL(STAT_MON, COP_MON_STATUS, COUNT, RegValue);
+
+ temp = ((NvU64)RegValue * pDfsKHz->Domains[NvRmDfsClockId_Avp]);
+ temp = NvDiv64(temp, pDfsKHz->Domains[NvRmDfsClockId_System]);
+
+ pIdleData->Readings[NvRmDfsClockId_Avp] = (NvU32)temp;
+
+ /*
+ * Read AHB monitor: disable it (=stop, the readings are preserved) and
+ * clear interrupt status bit (by writing "1"). Then, read AHB idle count
+ * (in AHB clock cycles) and store it in idle data packet.
+ */
+ RegValue = NV_DRF_NUM(STAT_MON, AHB_MON_CTRL, INT, 1);
+ NV_SYSTAT_REGW(pSystatRegs, AHB_MON_CTRL, RegValue);
+
+ RegValue = NV_SYSTAT_REGR(pSystatRegs, AHB_MON_STATUS);
+ pIdleData->Readings[NvRmDfsClockId_Ahb] =
+ NV_DRF_VAL(STAT_MON, AHB_MON_STATUS, COUNT, RegValue);
+
+ /*
+ * Read APB monitor: disable it (=stop, the readings are preserved) and
+ * clear interrupt status bit (by writing "1"). Then, read APB idle count
+ * (in APB clock cycles) and store it in idle data packet.
+ */
+ RegValue = NV_DRF_NUM(STAT_MON, APB_MON_CTRL, INT, 1);
+ NV_SYSTAT_REGW(pSystatRegs, APB_MON_CTRL, RegValue);
+
+ RegValue = NV_SYSTAT_REGR(pSystatRegs, APB_MON_STATUS);
+ pIdleData->Readings[NvRmDfsClockId_Apb] =
+ NV_DRF_VAL(STAT_MON, APB_MON_STATUS, COUNT, RegValue);
+
+ /*
+ * Read CPU monitor: read current sampling period and store it in idle
+ * data packet. Disable monitor (=stop, the readings are preserved) and
+ * clear interrupt status bit (by writing "1"). Read CPU idle count.
+ * Since CPU monitor is counting System (not CPU!) cycles, the monitor
+ * readings are converted to CPU clocks before storing in idle data packet
+ */
+ RegValue = NV_SYSTAT_REGR(pSystatRegs, CPU_MON_CTRL);
+ pIdleData->CurrentIntervalMs = 1 + // systat monitors use (n+1) ms counters
+ NV_DRF_VAL(STAT_MON, CPU_MON_CTRL, SAMPLE_PERIOD, RegValue);
+
+ RegValue = NV_DRF_NUM(STAT_MON, CPU_MON_CTRL, INT, 1);
+ NV_SYSTAT_REGW(pSystatRegs, CPU_MON_CTRL, RegValue);
+
+ // Add LP2 time to idle measurements (WAR for bug 429585)
+ // For logging only - use 2^10 ~ 1000, and round up
+ RegValue = NvRmPrivGetLp2TimeUS(pDfs->hRm);
+ pIdleData->Lp2TimeMs = (RegValue + (0x1 << 10) - 1) >> 10;
+ if ((RegValue == 0) || NoLp2Offset)
+ {
+ pIdleData->Readings[NvRmDfsClockId_Cpu] = 0;
+ }
+ else if (RegValue < NVRM_DFS_MAX_SAMPLE_MS * 1000)
+ { // (US * KHz) / 1000 ~ (US * 131 * KHz) / (128 * 1024)
+ pIdleData->Readings[NvRmDfsClockId_Cpu] =
+ (NvU32)(((NvU64)(RegValue * 131) *
+ pDfsKHz->Domains[NvRmDfsClockId_Cpu]) >> 17);
+ }
+ else
+ {
+ pIdleData->Readings[NvRmDfsClockId_Cpu] = 0xFFFFFFFFUL;
+ return; // the entire sample is idle, anyway
+ }
+ RegValue = NV_SYSTAT_REGR(pSystatRegs, CPU_MON_STATUS);
+ RegValue = NV_DRF_VAL(STAT_MON, CPU_MON_STATUS, COUNT, RegValue);
+
+ temp = ((NvU64)RegValue * pDfsKHz->Domains[NvRmDfsClockId_Cpu]);
+ temp = NvDiv64(temp, pDfsKHz->Domains[NvRmDfsClockId_System]);
+
+ pIdleData->Readings[NvRmDfsClockId_Cpu] += (NvU32)temp;
+}
+
+/*****************************************************************************/
+// VDE MODULE INTERFACES
+/*****************************************************************************/
+
+NvError NvRmPrivAp15VdeMonitorsInit(NvRmDfs* pDfs)
+{
+ NvU32 RegValue;
+ void* pVdeRegs = pDfs->Modules[NvRmDfsModuleId_Vde].pBaseReg;
+ NV_ASSERT(pVdeRegs);
+
+ /*
+ * Video pipe monitor belongs to VDE module - just clear monitor control
+ * register including interrupt status bit, and do not touch anything
+ * else in VDE
+ */
+ RegValue = NV_DRF_NUM(ARVDE_PPB, IDLE_MON, INT_STATUS, 1);
+ NV_VDE_REGW(pVdeRegs, IDLE_MON, RegValue);
+
+ return NvSuccess;
+}
+
+void NvRmPrivAp15VdeMonitorsDeinit(NvRmDfs* pDfs)
+{
+ // Stop monitor using initialization procedure
+ (void)NvRmPrivAp15VdeMonitorsInit(pDfs);
+}
+
+void
+NvRmPrivAp15VdeMonitorsStart(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ const NvU32 IntervalMs)
+{
+ NvU32 RegValue;
+ NvU32 cycles = IntervalMs * pDfsKHz->Domains[NvRmDfsClockId_Vpipe];
+ void* pVdeRegs = pDfs->Modules[NvRmDfsModuleId_Vde].pBaseReg;
+
+ /*
+ * Start VDE vpipe monitor for the next sample period. Interrupt status bit
+ * is cleared by writing "1" (it is not connected to interrupt controller,
+ * just "count end" status bit). Monitor is counting v-clock cycles while
+ * all VDE submodules are idle. The sample period is specified in v-clock
+ * cycles rather than in time units.
+ */
+ RegValue = NV_DRF_NUM(ARVDE_PPB, IDLE_MON, ENB, 1) |
+ NV_DRF_NUM(ARVDE_PPB, IDLE_MON, INT_STATUS, 1) |
+ NV_DRF_NUM(ARVDE_PPB, IDLE_MON, SAMPLE_PERIOD, cycles);
+ NV_VDE_REGW(pVdeRegs, IDLE_MON, RegValue);
+}
+
+void
+NvRmPrivAp15VdeMonitorsRead(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfsIdleData* pIdleData)
+{
+ // CAR module virtual base address
+ static void* s_pCarBaseReg = NULL;
+ NvU32 RegValue;
+ void* pVdeRegs = pDfs->Modules[NvRmDfsModuleId_Vde].pBaseReg;
+
+ if (s_pCarBaseReg == NULL)
+ {
+ NvRmModuleTable *tbl = NvRmPrivGetModuleTable(pDfs->hRm);
+ s_pCarBaseReg = (tbl->ModInst +
+ tbl->Modules[NvRmPrivModuleID_ClockAndReset].Index)->VirtAddr;
+ }
+ RegValue = NV_CAR_REGR(s_pCarBaseReg, CLK_OUT_ENB_H);
+
+ // If VDE clock is disabled set idle count to maximum
+ if (!(RegValue & CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_VDE_FIELD))
+ {
+ pIdleData->Readings[NvRmDfsClockId_Vpipe] = (NvU32)-1;
+ return;
+ }
+
+ /*
+ * Read VDE vpipe monitor: disable it (=stop, the readings are preserved) and
+ * clear count done status bit (by writing "1"). Then, read VDE idle count (in
+ * v-clock cycles) and store it in idle data packet.
+ */
+ RegValue = NV_DRF_NUM(ARVDE_PPB, IDLE_MON, INT_STATUS, 1);
+ NV_VDE_REGW(pVdeRegs, IDLE_MON, RegValue);
+
+ RegValue = NV_VDE_REGR(pVdeRegs, IDLE_STATUS);
+ pIdleData->Readings[NvRmDfsClockId_Vpipe] =
+ NV_DRF_VAL(ARVDE_PPB, IDLE_STATUS, COUNT, RegValue);
+}
+
+/*****************************************************************************/
+// EMC MODULE INTERFACES
+/*****************************************************************************/
+
+NvError NvRmPrivAp15EmcMonitorsInit(NvRmDfs* pDfs)
+{
+ NvU32 RegValue;
+ void* pEmcRegs = pDfs->Modules[NvRmDfsModuleId_Emc].pBaseReg;
+ NV_ASSERT(pEmcRegs);
+
+ /*
+ * EMC power management monitor belongs to EMC module - just reset it,
+ * and do not touch anything else in EMC.
+ */
+ RegValue = NV_EMC_REGR(pEmcRegs, STAT_CONTROL);
+ RegValue = NV_FLD_SET_DRF_DEF(EMC, STAT_CONTROL, PWR_GATHER, RST, RegValue);
+ NV_EMC_REGW(pEmcRegs, STAT_CONTROL, RegValue);
+
+ /*
+ * EMC active clock cycles = EMC monitor reading * 2^M, where M depends
+ * on DRAM type and bus width. Power M is stored as EMC readouts scale
+ */
+ #define COUNT_SHIFT_SDRAM_X32 (2)
+ #define COUNT_SHIFT_DDR1_X32 (1)
+ RegValue = NV_EMC_REGR(pEmcRegs, FBIO_CFG5);
+ switch (NV_DRF_VAL(EMC, FBIO_CFG5, DRAM_TYPE, RegValue))
+ {
+ case EMC_FBIO_CFG5_0_DRAM_TYPE_SDR:
+ pDfs->Modules[NvRmDfsModuleId_Emc].Scale = COUNT_SHIFT_SDRAM_X32;
+ break;
+ case EMC_FBIO_CFG5_0_DRAM_TYPE_DDR1:
+ pDfs->Modules[NvRmDfsModuleId_Emc].Scale = COUNT_SHIFT_DDR1_X32;
+ break;
+ default:
+ NV_ASSERT(!"Not supported DRAM type");
+ }
+ if (NV_DRF_VAL(EMC, FBIO_CFG5, DRAM_WIDTH, RegValue) ==
+ EMC_FBIO_CFG5_0_DRAM_WIDTH_X16)
+ {
+ pDfs->Modules[NvRmDfsModuleId_Emc].Scale++;
+ }
+ return NvSuccess;
+}
+
+void NvRmPrivAp15EmcMonitorsDeinit(NvRmDfs* pDfs)
+{
+ // Stop monitor using initialization procedure
+ (void)NvRmPrivAp15EmcMonitorsInit(pDfs);
+}
+
+void
+NvRmPrivAp15EmcMonitorsStart(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ const NvU32 IntervalMs)
+{
+ NvU32 RegValue, SavedRegValue;
+ void* pEmcRegs = pDfs->Modules[NvRmDfsModuleId_Emc].pBaseReg;
+
+ // EMC sample period is specified in EMC clock cycles, accuracy 0-16 cycles.
+ #define MEAN_EMC_LIMIT_ERROR (8)
+ NvU32 cycles = IntervalMs * pDfsKHz->Domains[NvRmDfsClockId_Emc] +
+ MEAN_EMC_LIMIT_ERROR;
+ /*
+ * Start EMC power monitor for the next sample period: clear EMC counters,
+ * set sample interval limit in EMC cycles, enable monitoring. Monitor is
+ * counting EMC 1x clock cycles while any memory access is detected.
+ */
+ SavedRegValue = NV_EMC_REGR(pEmcRegs, STAT_CONTROL);
+ RegValue = NV_FLD_SET_DRF_DEF(EMC, STAT_CONTROL, PWR_GATHER, CLEAR, SavedRegValue);
+ NV_EMC_REGW(pEmcRegs, STAT_CONTROL, RegValue);
+
+ RegValue = NV_DRF_NUM(EMC, STAT_PWR_CLOCK_LIMIT, PWR_CLOCK_LIMIT, cycles);
+ NV_EMC_REGW(pEmcRegs, STAT_PWR_CLOCK_LIMIT, RegValue);
+
+ RegValue = NV_FLD_SET_DRF_DEF(EMC, STAT_CONTROL, PWR_GATHER, ENABLE, SavedRegValue);
+ NV_EMC_REGW(pEmcRegs, STAT_CONTROL, RegValue);
+}
+
+void
+NvRmPrivAp15EmcMonitorsRead(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfsIdleData* pIdleData)
+{
+ NvU32 RegValue, TotalClocks;
+ NvU32 CountShift = pDfs->Modules[NvRmDfsModuleId_Emc].Scale;
+ void* pEmcRegs = pDfs->Modules[NvRmDfsModuleId_Emc].pBaseReg;
+
+ /*
+ * Read EMC monitor: disable it (=stop, the readings are preserved), and
+ * determine idle count based on total and active clock counts. Monitor
+ * readings are multiplied by 2^M factor to determine active count, where
+ * power M depends on DRAM type and bus width. Store result in the idle
+ * data packet.
+ */
+ RegValue = NV_EMC_REGR(pEmcRegs, STAT_CONTROL);
+ RegValue = NV_FLD_SET_DRF_DEF(EMC, STAT_CONTROL, PWR_GATHER, DISABLE, RegValue);
+ NV_EMC_REGW(pEmcRegs, STAT_CONTROL, RegValue);
+
+ RegValue = NV_EMC_REGR(pEmcRegs, STAT_PWR_CLOCKS);
+ TotalClocks = NV_DRF_VAL(EMC, STAT_PWR_CLOCKS, PWR_CLOCKS, RegValue);
+ RegValue = NV_EMC_REGR(pEmcRegs, STAT_PWR_COUNT);
+ RegValue = NV_DRF_VAL(EMC, STAT_PWR_COUNT, PWR_COUNT, RegValue) << CountShift;
+
+ pIdleData->Readings[NvRmDfsClockId_Emc] =
+ (TotalClocks > RegValue) ? (TotalClocks - RegValue) : 0;
+}
+
+/*****************************************************************************/
+
+void
+NvRmPrivAp15SetSvopControls(
+ NvRmDeviceHandle hRm,
+ NvU32 SvopSetting)
+{
+#define SVOP_MASK \
+ (NV_DRF_NUM(APB_MISC, GP_ASDBGREG, CFG2TMC_RAM_SVOP_DP, 0xFFFFFFFFUL) | \
+ NV_DRF_NUM(APB_MISC, GP_ASDBGREG, CFG2TMC_RAM_SVOP_PDP, 0xFFFFFFFFUL) | \
+ NV_DRF_NUM(APB_MISC, GP_ASDBGREG, CFG2TMC_RAM_SVOP_REG, 0xFFFFFFFFUL) | \
+ NV_DRF_NUM(APB_MISC, GP_ASDBGREG, CFG2TMC_RAM_SVOP_SP, 0xFFFFFFFFUL))
+
+ NvU32 reg;
+ static void* s_pApbBaseReg = NULL; // APB MISC module virtual base address
+
+ if (s_pApbBaseReg == NULL)
+ {
+ NvRmModuleTable *tbl = NvRmPrivGetModuleTable(hRm);
+ s_pApbBaseReg = (tbl->ModInst +
+ tbl->Modules[NvRmModuleID_Misc].Index)->VirtAddr;
+ }
+ NV_ASSERT((SvopSetting & (~SVOP_MASK)) == 0);
+ reg = NV_APB_REGR(s_pApbBaseReg, GP_ASDBGREG); // RAM timing control
+ reg = (reg & (~SVOP_MASK)) | SvopSetting;
+ NV_APB_REGW(s_pApbBaseReg, GP_ASDBGREG, reg);
+}
+
+/*****************************************************************************/
+
+void* NvRmPrivAp15GetTimerUsVirtAddr(NvRmDeviceHandle hRm)
+{
+ NvRmModuleTable *tbl = NvRmPrivGetModuleTable(hRm);
+ void* va = (void*)((NvUPtr)((tbl->ModInst +
+ tbl->Modules[NvRmModuleID_TimerUs].Index)->VirtAddr) +
+ TIMERUS_CNTR_1US_0);
+ return va;
+}
+
+/*****************************************************************************/
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power_dfs.h b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power_dfs.h
new file mode 100644
index 000000000000..10889666d25c
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power_dfs.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Power Resource manager </b>
+ *
+ * @b Description: NvRM DFS parameters.
+ *
+ */
+
+#ifndef INCLUDED_AP15RM_POWER_DFS_H
+#define INCLUDED_AP15RM_POWER_DFS_H
+
+#include "nvrm_power.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+// Min KHz for CPU and AVP with regards to JTAG support - 1MHz * 8 = 8MHz
+// TODO: any other limitations on min KHz?
+// TODO: adjust boost parameters based on testing
+
+/**
+ * Default DFS algorithm parameters for CPU domain
+ */
+#define NVRM_DFS_PARAM_CPU_AP15 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 10000, /* Minimum domain frequency 10 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 32000, /* Fixed frequency boost increase 32 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 4000, /* Fixed frequency boost increase 4 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ 3, /* Relative adjustement of average freqiency 1/2^3 ~ 12% */ \
+ 1, /* Number of smaple intervals with NRT to trigger boost = 2 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/**
+ * Default DFS algorithm parameters for AVP domain
+ */
+#define NVRM_DFS_PARAM_AVP_AP15 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 24000, /* Minimum domain frequency 24 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 8000, /* Fixed frequency boost increase 8 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 1000, /* Fixed frequency NRT boost increase 1 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ 3, /* Relative adjustement of average freqiency 1/2^3 ~ 12% */ \
+ 2, /* Number of smaple intervals with NRT to trigger boost = 3 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/**
+ * Default DFS algorithm parameters for System clock domain
+ */
+#define NVRM_DFS_PARAM_SYSTEM_AP15 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 24000, /* Minimum domain frequency 24 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 8000, /* Fixed frequency boost increase 8 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 1000, /* Fixed frequency NRT boost increase 1 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 32, /* Proportional frequency boost decrease 32/256 ~ 12% */ \
+ },\
+ 5, /* Relative adjustement of average freqiency 1/2^5 ~ 3% */ \
+ 2, /* Number of smaple intervals with NRT to trigger boost = 3 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/**
+ * Default DFS algorithm parameters for AHB clock domain
+ */
+#define NVRM_DFS_PARAM_AHB_AP15 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 24000, /* Minimum domain frequency 24 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 8000, /* Fixed frequency boost increase 8 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 1000, /* Fixed frequency NRT boost increase 1 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 32, /* Proportional frequency boost decrease 32/256 ~ 12% */ \
+ },\
+ 0, /* Relative adjustement of average freqiency 1/2^0 ~ 100% */ \
+ 0, /* Number of smaple intervals with NRT to trigger boost = 1 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/**
+ * Default DFS algorithm parameters for APB clock domain
+ */
+#define NVRM_DFS_PARAM_APB_AP15 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 15000, /* Minimum domain frequency 15 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 8000, /* Fixed frequency boost increase 8 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 1000, /* Fixed frequency NRT boost increase 1 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 32, /* Proportional frequency boost decrease 32/256 ~ 12% */ \
+ },\
+ 0, /* Relative adjustement of average freqiency 1/2^0 ~ 100% */ \
+ 0, /* Number of smaple intervals with NRT to trigger boost = 1 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/**
+ * Default DFS algorithm parameters for Video-pipe clock domain
+ */
+#define NVRM_DFS_PARAM_VPIPE_AP15 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 24000, /* Minimum domain frequency 24 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 16000, /* Fixed frequency RT boost increase 16 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 1000, /* Fixed frequency NRT boost increase 1 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ 5, /* Relative adjustement of average freqiency 1/2^5 ~ 3% */ \
+ 3, /* Number of smaple intervals with NRT to trigger boost = 4 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/**
+ * Default DFS algorithm parameters for EMC clock domain
+ */
+#define NVRM_DFS_PARAM_EMC_AP15 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 16000, /* Minimum domain frequency 16 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 16000, /* Fixed frequency RT boost increase 16 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 1000, /* Fixed frequency NRT boost increase 1 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ 0, /* Relative adjustement of average freqiency 1/2^0 ~ 100% */ \
+ 0, /* Number of smaple intervals with NRT to trigger boost = 1 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/// Default low corner for core voltage
+#define NVRM_AP15_LOW_CORE_MV (950)
+
+/// Core voltage in suspend
+#define NVRM_AP15_SUSPEND_CORE_MV (1000)
+
+/*****************************************************************************/
+
+/**
+ * Initializes activity monitors within the DFS module. Only activity
+ * monitors are affected. The rest of module's h/w is preserved.
+ *
+ * @param pDfs - A pointer to DFS structure.
+ *
+ * @return NvSuccess if initialization completed successfully
+ * or one of common error codes on failure.
+ */
+NvError NvRmPrivAp15SystatMonitorsInit(NvRmDfs* pDfs);
+NvError NvRmPrivAp15VdeMonitorsInit(NvRmDfs* pDfs);
+NvError NvRmPrivAp15EmcMonitorsInit(NvRmDfs* pDfs);
+
+/**
+ * Deinitializes activity monitors within the DFS module. Only activity
+ * monitors are affected. The rest of module's h/w is preserved.
+ *
+ * @param pDfs - A pointer to DFS structure.
+ */
+void NvRmPrivAp15SystatMonitorsDeinit(NvRmDfs* pDfs);
+void NvRmPrivAp15VdeMonitorsDeinit(NvRmDfs* pDfs);
+void NvRmPrivAp15EmcMonitorsDeinit(NvRmDfs* pDfs);
+
+/**
+ * Starts activity monitors in the DFS module for the next sample interval.
+ *
+ * @param pDfs - A pointer to DFS structure.
+ * @param pDfsKHz - A pointer to current DFS clock frequencies structure.
+ * @param IntervalMs Next sampling interval in ms.
+ */
+void
+NvRmPrivAp15SystatMonitorsStart(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ const NvU32 IntervalMs);
+void
+NvRmPrivAp15VdeMonitorsStart(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ const NvU32 IntervalMs);
+void
+NvRmPrivAp15EmcMonitorsStart(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ const NvU32 IntervalMs);
+
+/**
+ * Reads idle count from activity monitors in the DFS module. The monitors are
+ * stopped.
+ *
+ * @param pDfs - A pointer to DFS structure.
+ * @param pDfsKHz - A pointer to current DFS clock frequencies structure.
+ * @param pIdleData - A pointer to idle cycles structure to be filled in with
+ * data read from the monitor.
+ *
+ */
+void
+NvRmPrivAp15SystatMonitorsRead(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfsIdleData* pIdleData);
+void
+NvRmPrivAp15VdeMonitorsRead(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfsIdleData* pIdleData);
+void
+NvRmPrivAp15EmcMonitorsRead(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfsIdleData* pIdleData);
+
+/**
+ * Changes RAM timing SVOP settings.
+ *
+ * @param hRm The RM device handle.
+ * @param SvopSetting New SVOP setting.
+ */
+void
+NvRmPrivAp15SetSvopControls(
+ NvRmDeviceHandle hRm,
+ NvU32 SvopSetting);
+
+/**
+ * Gets uS Timer RM virtual address,
+ *
+ * @param hRm The RM device handle.
+ *
+ * @return uS Timer RM virtual address mapped by RM
+ */
+void* NvRmPrivAp15GetTimerUsVirtAddr(NvRmDeviceHandle hRm);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // INCLUDED_AP15RM_POWER_DFS_H
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power_oalintf.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power_oalintf.c
new file mode 100644
index 000000000000..b5518380f259
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_power_oalintf.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Power Resource manager API shared with OS adaptation layer</b>
+ *
+ * @b Description: Implements private HW interface shared by the NvRM Power
+ * manager and OS adaptation layer (OAL).
+ *
+ */
+
+#include "nvrm_power.h"
+#include "nvrm_clocks.h"
+#include "nvrm_module.h"
+#include "nvrm_drf.h"
+#include "nvrm_hwintf.h"
+#include "ap15rm_private.h"
+#include "nvrm_structure.h"
+#include "ap15/arapb_misc.h"
+#include "ap15rm_pmc_scratch_map.h"
+#include "common/nvrm_chiplib.h"
+#include "nvassert.h"
+
+/*****************************************************************************/
+
+/*
+ * Macros for power state register field access.
+ * AP15+: a dedicated bits in PMC scratch register 0 are allocated for RM power
+ * state fields.
+ */
+#define SET_POWER_FLD_AP15(rm, FieldName, FieldValue) \
+ do \
+ { \
+ if (!NvRmIsSimulation())\
+ {\
+ NvU32 RegValue; \
+ NvU32 RegOffset = APBDEV_PMC_SCRATCH0_0; \
+ NvOsMutexLock(s_hPmcScratchMutex); \
+ RegValue = NV_REGR(rm, NvRmModuleID_Pmif, 0, RegOffset); \
+ RegValue = NV_FLD_SET_DRF_NUM(\
+ APBDEV_PMC, SCRATCH0, FieldName, FieldValue, RegValue); \
+ NV_REGW(rm, NvRmModuleID_Pmif, 0, RegOffset, RegValue); \
+ NvOsMutexUnlock(s_hPmcScratchMutex); \
+ }\
+ } while (0)
+
+#define GET_POWER_FLD_AP15(rm, FieldName) \
+ NV_DRF_VAL(APBDEV_PMC, SCRATCH0, FieldName, \
+ (NV_REGR(rm, NvRmModuleID_Pmif, 0, APBDEV_PMC_SCRATCH0_0)));
+
+/*****************************************************************************/
+
+// Mutex for thread-safe access to PMC scratch fields
+static NvOsMutexHandle s_hPmcScratchMutex = NULL;
+
+// Pointer to LP2 Time storage
+static NvUPtr s_pLp2Time = 0;
+
+NvError NvRmPrivOalIntfInit(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvError e;
+ NV_ASSERT(hRmDeviceHandle);
+
+ // Create PMC scratch register access mutex
+ s_pLp2Time = 0;
+ s_hPmcScratchMutex = NULL;
+ NV_CHECK_ERROR_CLEANUP(NvOsMutexCreate(&s_hPmcScratchMutex));
+
+ // Clear DFS flags; other fields initialized by OAL and preserved by RM
+ SET_POWER_FLD_AP15(hRmDeviceHandle, RM_DFS_FLAG, 0);
+ return NvSuccess;
+
+fail:
+ NvRmPrivOalIntfDeinit(hRmDeviceHandle);
+ return e;
+}
+
+void NvRmPrivOalIntfDeinit(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvOsMutexDestroy(s_hPmcScratchMutex);
+ s_hPmcScratchMutex = NULL;
+}
+
+/*****************************************************************************/
+
+/*
+ * Write synchronization with the OAL is responsibility of the OAL, i.e., OAL
+ * calls set state function only on entry to LPx state in single-thread
+ * environment
+ */
+void
+NvRmPrivPowerSetState(NvRmDeviceHandle hRmDeviceHandle, NvRmPowerState RmState)
+{
+ SET_POWER_FLD_AP15(hRmDeviceHandle, RM_PWR_STATE, RmState);
+}
+
+NvRmPowerState
+NvRmPrivPowerGetState(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvRmPowerState state = 0;
+
+ if (!NvRmIsSimulation())
+ {
+ state = GET_POWER_FLD_AP15(hRmDeviceHandle, RM_PWR_STATE);
+ }
+ return state;
+}
+
+/*****************************************************************************/
+
+/*
+ * Read synchronization with the OAL is responsibility of the OAL, i.e., OAL
+ * calls get flags function only on entry to LPx state in single-thread
+ * environment
+ */
+NvU32
+NvRmPrivGetDfsFlags(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvU32 Flags = 0;
+ if (!NvRmIsSimulation())
+ {
+ Flags = GET_POWER_FLD_AP15(hRmDeviceHandle, RM_DFS_FLAG);
+ if (!(Flags & NvRmDfsStatusFlags_StopPllA0))
+ Flags &= (~NvRmDfsStatusFlags_StopPllP0); // PLLA input from PLLP
+ }
+ return Flags;
+}
+
+void
+NvRmPrivUpdateDfsPauseFlag(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvBool Pause)
+{
+ if (!NvRmIsSimulation())
+ {
+ NvU32 RegValue;
+ NvU32 RegOffset = APBDEV_PMC_SCRATCH0_0;
+ NvU32 mask = (NvRmDfsStatusFlags_Pause <<
+ NV_FIELD_SHIFT(APBDEV_PMC_SCRATCH0_0_RM_DFS_FLAG_RANGE));
+
+ NvOsMutexLock(s_hPmcScratchMutex);
+
+ RegValue = NV_REGR(hRmDeviceHandle, NvRmModuleID_Pmif, 0, RegOffset);
+ if (Pause)
+ RegValue |= mask;
+ else
+ RegValue &= ~mask;
+ NV_REGW(hRmDeviceHandle, NvRmModuleID_Pmif, 0, RegOffset, RegValue);
+
+ NvOsMutexUnlock(s_hPmcScratchMutex);
+ }
+}
+
+void
+NvRmPrivPllRefUpdate(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmPllReference* pPllRef,
+ NvBool Increment)
+{
+#if !NV_OAL
+ NvU32 RegValue, mask;
+ NvU32 RegOffset = APBDEV_PMC_SCRATCH0_0;
+
+ // Do nothing for platforms other, than SoC
+ if (NvRmPrivGetExecPlatform(hRmDeviceHandle) != ExecPlatform_Soc)
+ return;
+
+ NV_ASSERT(pPllRef);
+ NV_ASSERT(pPllRef->StopFlag <=
+ NV_FIELD_MASK(APBDEV_PMC_SCRATCH0_0_RM_DFS_FLAG_RANGE));
+ mask = (pPllRef->StopFlag <<
+ NV_FIELD_SHIFT(APBDEV_PMC_SCRATCH0_0_RM_DFS_FLAG_RANGE));
+
+ NvOsMutexLock(s_hPmcScratchMutex);
+
+ if (Increment)
+ {
+ pPllRef->ReferenceCnt++;
+ if (pPllRef->ReferenceCnt == 1)
+ {
+ RegValue = (~mask) &
+ (NV_REGR(hRmDeviceHandle, NvRmModuleID_Pmif, 0, RegOffset));
+ NV_REGW(hRmDeviceHandle, NvRmModuleID_Pmif, 0, RegOffset, RegValue);
+ }
+ }
+ else
+ {
+ NV_ASSERT(pPllRef->ReferenceCnt);
+ pPllRef->ReferenceCnt--;
+ if (pPllRef->ReferenceCnt == 0)
+ {
+ RegValue = mask |
+ (NV_REGR(hRmDeviceHandle, NvRmModuleID_Pmif, 0, RegOffset));
+ NV_REGW(hRmDeviceHandle, NvRmModuleID_Pmif, 0, RegOffset, RegValue);
+ }
+ }
+ NvOsMutexUnlock(s_hPmcScratchMutex);
+#endif
+}
+
+/*****************************************************************************/
+
+/*
+ * Write synchronization with the OAL is responsibility of the OAL, i.e., OAL
+ * calls set state function only in OEMInit() single-thread environment
+ */
+void
+NvRmPrivSetDownloadTransport(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvOdmDownloadTransport Transport)
+{
+ NV_ASSERT(Transport <=
+ NV_FIELD_MASK(APBDEV_PMC_SCRATCH0_0_RM_LOAD_TRANSPORT_RANGE));
+ SET_POWER_FLD_AP15(hRmDeviceHandle, RM_LOAD_TRANSPORT, Transport);
+}
+
+NvOdmDownloadTransport
+NvRmPrivGetDownloadTransport(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvOdmDownloadTransport Transport = NvOdmDownloadTransport_None;
+ if (!NvRmIsSimulation())
+ {
+ Transport = GET_POWER_FLD_AP15(hRmDeviceHandle, RM_LOAD_TRANSPORT);
+ }
+ return Transport;
+}
+
+/*****************************************************************************/
+
+void NvRmPrivAp15IoPowerDetectReset(NvRmDeviceHandle hRmDeviceHandle)
+{
+ if (!NvRmIsSimulation())
+ {
+ NvU32 RegValue;
+ NvU32 RegOffset = APBDEV_PMC_SCRATCH0_0;
+ NvOsMutexLock(s_hPmcScratchMutex);
+
+ RegValue =
+ NV_REGR(hRmDeviceHandle, NvRmModuleID_Pmif, 0, RegOffset);
+ RegValue = NV_FLD_SET_DRF_NUM(
+ APBDEV_PMC, SCRATCH0, RST_PWR_DET, 1, RegValue);
+ NV_REGW(hRmDeviceHandle,
+ NvRmModuleID_Pmif, 0, RegOffset, RegValue);
+ RegValue = NV_FLD_SET_DRF_NUM(
+ APBDEV_PMC, SCRATCH0, RST_PWR_DET, 0, RegValue);
+ NV_REGW(hRmDeviceHandle,
+ NvRmModuleID_Pmif, 0, RegOffset, RegValue);
+
+ NvOsMutexUnlock(s_hPmcScratchMutex);
+ }
+}
+
+/*****************************************************************************/
+
+/*
+ * PMC scratch register 21 is dedicated as LP2 time storage.
+ * Write synchronization with the OAL is responsibility of the OAL, i.e., OAL
+ * calls access this register only in single-thread environment.
+ */
+void
+NvRmPrivSetLp2TimeUS(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 TimeUS)
+{
+ if (NvRmIsSimulation())
+ return;
+
+ {
+ if (s_pLp2Time == 0)
+ {
+ NvRmModuleTable* tbl = NvRmPrivGetModuleTable(hRmDeviceHandle);
+ s_pLp2Time = ((NvUPtr)(tbl->ModInst +
+ tbl->Modules[NvRmModuleID_Pmif].Index)->VirtAddr) +
+ APBDEV_PMC_SCRATCH21_0;
+ }
+ NV_WRITE32(s_pLp2Time, TimeUS);
+ }
+}
+
+NvU32
+NvRmPrivGetLp2TimeUS(NvRmDeviceHandle hRmDeviceHandle)
+{
+ if (NvRmIsSimulation())
+ return 0;
+
+ {
+ if (s_pLp2Time == 0)
+ {
+ NvRmModuleTable* tbl = NvRmPrivGetModuleTable(hRmDeviceHandle);
+ s_pLp2Time = ((NvUPtr)(tbl->ModInst +
+ tbl->Modules[NvRmModuleID_Pmif].Index)->VirtAddr) +
+ APBDEV_PMC_SCRATCH21_0;
+ }
+ return NV_READ32(s_pLp2Time);
+ }
+}
+
+/*****************************************************************************/
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_private.h b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_private.h
new file mode 100644
index 000000000000..e6a1bd130fbc
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_private.h
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef AP15RM_PRIVATE_H
+#define AP15RM_PRIVATE_H
+
+/*
+ * ap15rm_private.h defines the private implementation functions for the
+ * resource manager.
+ */
+
+#include "nvcommon.h"
+#include "nvrm_structure.h"
+#include "nvrm_power_private.h"
+#include "nvodm_query.h"
+#include "nvos.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+// Enable this macro to catch spurious interrupts. By default this is disabled
+// as we allow spurious interrupts from GPIO controller.
+#if 0
+#define NVRM_INTR_DECODE_ASSERT(x) NV_ASSERT(x)
+#else
+#define NVRM_INTR_DECODE_ASSERT(x)
+#endif
+
+/**
+ * Find a module given its physical register address
+ *
+ * @param hDevice The RM instance
+ * @param Address Physical base address of the module's registers
+ * @param ModuleId Output parameter to hold the Id of the module (includes
+ * instance).
+ *
+ * @retval NvSuccess The module id was successfully identified.
+ * @retval NvError_NotSupported No module exists at the specified
+ * physical base address.
+ * @retval NvError_BadValue Invalid input parameters.
+ */
+NvError
+NvRmPrivFindModule(NvRmDeviceHandle hDevice, NvU32 Address,
+ NvRmPrivModuleID* ModuleId);
+
+/** Driver init for interrupts.
+ */
+void
+NvRmPrivInterruptTableInit( NvRmDeviceHandle hDevice );
+
+/**
+ * Enable interrupt source for interrupt decoder.
+ */
+/**
+ * Disable interrupt source for interrupt decoder.
+ */
+
+/**
+ * Main controller interrupt enable/disable for sub-controllers.
+ */
+
+/**
+ * Interrupt source enable/disable for AP15 main interrupt controllers.
+ */
+
+/**
+ * Chip unque id for AP15 and ap16.
+ */
+NvError
+NvRmPrivAp15ChipUniqueId(
+ NvRmDeviceHandle hDevHandle,
+ void* pId);
+
+// Initialize/deinitialize for various RM submodules.
+NvError NvRmPrivDmaInit(NvRmDeviceHandle hDevice);
+void NvRmPrivDmaDeInit(void);
+
+NvError NvRmPrivSpiSlinkInit(NvRmDeviceHandle hDevice);
+void NvRmPrivSpiSlinkDeInit(void);
+
+/**
+ * Retrieves module instance record pointer given module ID
+ *
+ * @param hDevice The RM device handle
+ * @param ModuleId The combined module ID and instance of the target module
+ * @param out Output storage pointer for instance record pointer
+ *
+ * @retval NvSuccess if instance pointer was successfully retrieved
+ * @retval NvError_BadValue if module ID is invalid
+ */
+NvError
+NvRmPrivGetModuleInstance(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId,
+ NvRmModuleInstance **out);
+
+/*
+ * OS specific interrupt initialization
+ */
+void
+NvRmPrivInterruptStart(NvRmDeviceHandle hDevice);
+
+/**
+ * Clear out anything that registered for an interrupt but didn't clean up
+ * afteritself.
+ */
+
+void
+NvRmPrivInterruptShutdown(NvRmDeviceHandle hDevice);
+
+/**
+ * Initializes the RM's internal state for tracking the pin-mux register
+ * configurations. This is done by iteratively applying the pre-defined
+ * configurations from ODM Query (see nvodm_query_pinmux.c). This function
+ * applies an "enable" setting when there's a match against the static
+ * declarations (in ODM Query).
+ *
+ * As this function walks the configuration list defined in ODM Query, it does
+ * *not* disable (apply tristate settings to) unused pin-groups for a given I/O
+ * module's configuration. That would be an exercise in futility, since the
+ * current I/O module cannot know if another I/O module is using any unclaimed
+ * pin-groups which the current I/O module configuration might otherwise use.
+ * That system-wide view of pin-group resources is the responsibility of the
+ * System Designer who selects pin-group combinations from the pin-mux
+ * documentation (see //sw/mobile/docs/hw/ap15/pin_mux_configurations.xls).
+ * The selected combination of pin-mux settings (which cannot be in conflict)
+ * are then saved to the configuration tables in ODM Query.
+ *
+ * Further, this initialization routine enables the configuration identified by
+ * the ODM Query tables. Any pre-existing settings are not changed, except as
+ * defined by the static configuration tables in ODM Query. Therefore, the
+ * System Designer *must* also account for pre-existing power-on-reset (POR)
+ * values when determining the valid pin-mux configurations saved in ODM Query.
+ *
+ * Finally, any use of the pin-mux registers prior to RM initialization *must*
+ * be consistent with the ODM Query tables, otherwise the system configuration
+ * is not deterministic (and may violate the definition applied by the System
+ * Designer). Once RM initializes its pin-mux state, any direct access to the
+ * pin-mux registers (ie, not using the RM PinMux API) is strictly prohibited.
+ *
+ * @param hDevice The RM device handle.
+ */
+void
+NvRmPrivInitPinMux(NvRmDeviceHandle hDevice);
+
+/**
+ * Create a mapping from a list of physical sdram pages to the GART. No error
+ * checking is done here, so you can clobber your friend's mappings if you
+ * want. Every map for itself! This function programs registers and cannot
+ * fail. Invalid parameters will result in asserts for debug purposes.
+ *
+ * @see NvRmPrivGartAlloc()
+ *
+ * @param hDevice The RM device handle.
+ * @param pPhysAddrArray Points to an array of physical page addresses. Each
+ * entry represents the base address for a 4KB page of memory. These entries
+ * do not need to be contiguous memory blocks; after all, that's why you're
+ * using this API.
+ * @param NumberOfPages Specifies the number of physical address entries. A
+ * value of 0 has no effect.
+ * @param FirstGartPage Specifies the base address of the first available GART
+ * page. This value should be obtained via a call to NvRmPrivGartAlloc().
+ */
+void
+NvRmPrivCreateGARTMap(
+ NvRmDeviceHandle hDevice,
+ NvU32 *pPhysAddrArray,
+ NvU32 NumberOfPages,
+ NvU32 FirstGartPage);
+
+/**
+ * Suspend GART.
+ */
+void NvRmPrivGartSuspend( NvRmDeviceHandle hDevice );
+
+/**
+ * Resume GART.
+ */
+
+void NvRmPrivGartResume(NvRmDeviceHandle hDevice);
+
+/**
+ * Initializes the clock manager.
+ *
+ * @param hRmDevice The RM device handle
+ *
+ * @return NvSuccess if initialization completed successfully
+ * or one of common error codes on failure
+ */
+NvError
+NvRmPrivClocksInit(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Deinitializes the clock manager.
+ *
+ * @param hRmDevice The RM device handle
+ */
+void
+NvRmPrivClocksDeinit(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Increments a memory handle reference count.
+ */
+void
+NvRmPrivMemIncrRef( NvRmMemHandle hMem );
+
+
+/*** Private Interrupt API's ***/
+
+
+/**
+ * Performs primary interrupt decode for IRQ interrupts in the main
+ * interrupt controllers.
+ *
+ * @param hRmDevice The RM device handle.
+ * @returns The IRQ number of the interrupting device or NVRM_IRQ_INVALID
+ * if no interrupting device was found.
+ */
+
+
+/**
+ * Performs secondary IRQ interrupt decode for interrupting devices
+ * that are interrupt sub-controllers.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param Irq Primary IRQ number returned from NvRmInterruptPrimaryDecodeIrq().
+ * @returns The IRQ number of the interrupting device.
+ */
+
+
+
+/**
+ * Performs primary interrupt decode for FIQ interrupts in the main
+ * interrupt controllers.
+ *
+ * @param hRmDevice The RM device handle.
+ * @returns The IRQ number of the interrupting device or NVRM_IRQ_INVALID
+ * if no interrupting device was found.
+ */
+
+
+
+/**
+ * Performs secondary FIQ interrupt decode for interrupting devices
+ * that are interrupt sub-controllers.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param Fiq Primary FIQ number returned from NvRmInterruptPrimaryDecodeFiq().
+ * @returns The FIQ number of the interrupting device.
+ */
+
+
+/**
+ * Suspend the dma.
+ */
+NvError NvRmPrivDmaSuspend(void);
+
+/**
+ * Resume the dma.
+ */
+NvError NvRmPrivDmaResume(void);
+
+/**
+ * Check Bond Out to make a module/instance invalid.
+ *
+ * @param hRm The RM device handle
+ */
+void NvRmPrivCheckBondOut( NvRmDeviceHandle hDevice );
+
+/** Returns bond out values and table for AP20 */
+void NvRmPrivAp20GetBondOut( NvRmDeviceHandle hDevice,
+ const NvU32 **pTable, NvU32 *bondOut );
+
+/**
+ * This API should be sapringly used. There is a bug in the chiplib where the
+ * interrupt handler is not passed an argument. So, the handler will call this
+ * function to get the Rm handle.
+ */
+NvRmDeviceHandle NvRmPrivGetRmDeviceHandle( void );
+
+/** Returns the pointer to the relocation table of AP15 chip */
+NvU32 *NvRmPrivAp15GetRelocationTable( NvRmDeviceHandle hDevice );
+
+/** Returns the pointer to the relocation table of AP16 chip */
+NvU32 *NvRmPrivAp16GetRelocationTable( NvRmDeviceHandle hDevice );
+
+/** Returns the pointer to the relocation table of AP20 chip */
+NvU32 *NvRmPrivAp20GetRelocationTable( NvRmDeviceHandle hDevice );
+
+/** Basic reset of AP15 chip modules */
+void NvRmPrivAp15BasicReset( NvRmDeviceHandle hDevice );
+/** Basic reset of AP20 chip modules */
+void NvRmPrivAp20BasicReset( NvRmDeviceHandle hDevice );
+
+/** This API starts the memory controller error monitoring for AP15/AP16. */
+NvError NvRmPrivAp15McErrorMonitorStart( NvRmDeviceHandle hDevice );
+
+/** This API stops the memory controller error monitoring for AP15/AP16. */
+void NvRmPrivAp15McErrorMonitorStop( NvRmDeviceHandle hDevice );
+
+/** This API starts the memory controller error monitoring for AP20. */
+NvError NvRmPrivAp20McErrorMonitorStart( NvRmDeviceHandle hDevice );
+
+/** This API stops the memory controller error monitoring for AP20. */
+void NvRmPrivAp20McErrorMonitorStop( NvRmDeviceHandle hDevice );
+
+/** This API sets up the memory controller for AP15/AP16. */
+void NvRmPrivAp15SetupMc(NvRmDeviceHandle hRm);
+
+/** This API sets up the memory controller for AP20. */
+void NvRmPrivAp20SetupMc(NvRmDeviceHandle hRm);
+
+/* init and deinit the keylist */
+NvError NvRmPrivInitKeyList(NvRmDeviceHandle hRm, const NvU32*, NvU32);
+void NvRmPrivDeInitKeyList(NvRmDeviceHandle hRm);
+
+/**
+ * @brief Query the max interface freq supported by the board for a given
+ * Module.
+ *
+ * This API returns the max interface freq supported by the board based on the
+ * ODM query.
+ */
+NvRmFreqKHz
+NvRmPrivGetInterfaceMaxClock(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID ModuleId);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // AP15RM_PRIVATE_H
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_reloctable.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_reloctable.c
new file mode 100644
index 000000000000..1854820e6de9
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap15rm_reloctable.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvrm_init.h"
+#include "common/nvrm_hwintf.h"
+#include "ap15/project_relocation_table.h"
+#include "ap15rm_private.h"
+
+static NvU32 s_RelocationTable[] =
+{
+ NV_RELOCATION_TABLE_INIT
+};
+
+NvU32 *
+NvRmPrivAp15GetRelocationTable( NvRmDeviceHandle hDevice )
+{
+ return s_RelocationTable;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap16rm_pinmux_tables.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap16rm_pinmux_tables.c
new file mode 100644
index 000000000000..4d2ebad4583a
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap16rm_pinmux_tables.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_drf.h"
+#include "nvassert.h"
+#include "nvrm_hwintf.h"
+#include "ap15rm_private.h"
+#include "ap16/arapb_misc.h"
+#include "ap15/arclk_rst.h"
+#include "nvrm_pinmux_utils.h"
+#include "nvodm_query_pinmux.h"
+#include "nvrm_clocks.h"
+
+extern const NvU32 g_Ap15MuxI2c1[];
+extern const NvU32 g_Ap15MuxI2c2[];
+extern const NvU32* g_Ap15MuxI2c[];
+
+extern const NvU32 g_Ap15MuxI2c_Pmu[];
+extern const NvU32* g_Ap15MuxI2cPmu[];
+
+extern const NvU32 g_Ap15Mux_Mmc[];
+extern const NvU32* g_Ap15MuxMmc[];
+
+extern const NvU32 g_Ap15MuxSdio2[];
+extern const NvU32 g_Ap15MuxSdio3[];
+extern const NvU32* g_Ap15MuxSdio[];
+
+extern const NvU32 g_Ap15Mux_Spdif[];
+extern const NvU32* g_Ap15MuxSpdif[];
+
+static const NvU32 g_Ap16MuxUart1[] = {
+ // Reset config - abandon IRRX, IRTX &amp; SDD
+ UNCONFIG(C,IRRX,UARTA,RSVD2), UNCONFIG(C,IRTX,UARTA,RSVD2), UNCONFIG(D,SDD,UARTA,PWM), CONFIGEND(),
+ // 8b UAA + UAB pads
+ CONFIG(B,A,UAA,UARTA), CONFIG(B,A,UAB,UARTA), CONFIGEND(),
+ // 4b UAA pads
+ CONFIG(B,A,UAA,UARTA), CONFIGEND(),
+ // 7b GPU pads
+ CONFIG(A,D,GPU,UARTA), CONFIGEND(),
+ // 4b VFIR + UAD pads
+ CONFIG(A,C,IRRX,UARTA), CONFIG(A,C,IRTX,UARTA), CONFIG(B,A,UAD,UARTA), CONFIGEND(),
+ // 2b VFIR pads
+ CONFIG(A,C,IRRX,UARTA), CONFIG(A,C,IRTX,UARTA), CONFIGEND(),
+ // 2b SDIO pads
+ CONFIG(B,D,SDD,UARTA), CONFIGEND(),
+ MODULEDONE()
+};
+static const NvU32 g_Ap16MuxUart2[] = {
+// Reset config - abandon UAD. pads.chosen SFLASH pads
+ UNCONFIG(A,UAD,IRDA,SFLASH), CONFIGEND(),
+// 4b UAD + IRRX + IRTX pads
+ CONFIG(B,A,UAD,IRDA), CONFIG(A,C,IRRX,UARTB), CONFIG(A,C,IRTX,UARTB), CONFIGEND(),
+//..2b UAD pads
+ CONFIG(B,A,UAD,IRDA), CONFIGEND(),
+ MODULEDONE()
+};
+
+static const NvU32 g_Ap16MuxUart3[] = {
+ // Reset config - abandon UCA. chosen RSVD1
+ UNCONFIG(B,UCA,UARTC,RSVD1), CONFIGEND(),
+ // 4b UCA + UCB pads
+ CONFIG(B,B,UCA,UARTC), CONFIG(B,B,UCB,UARTC), CONFIGEND(),
+ // 2b UCA pads
+ CONFIG(B,B,UCA,UARTC), CONFIGEND(),
+ MODULEDONE()
+};
+
+static const NvU32* g_Ap16MuxUart[] = {
+ &g_Ap16MuxUart1[0],
+ &g_Ap16MuxUart2[0],
+ &g_Ap16MuxUart3[0],
+ NULL
+};
+extern const NvU32 g_Ap15MuxSpi1[];
+extern const NvU32 g_Ap15MuxSpi2[];
+extern const NvU32 g_Ap15MuxSpi3[];
+extern const NvU32* g_Ap15MuxSpi[];
+
+extern const NvU32 g_Ap15Mux_Sflash[];
+extern const NvU32* g_Ap15MuxSflash[];
+
+extern const NvU32 g_Ap15Mux_Twc[];
+extern const NvU32* g_Ap15MuxTwc[];
+
+extern const NvU32 g_Ap15Mux_Ata[];
+extern const NvU32* g_Ap15MuxAta[];
+
+extern const NvU32 g_Ap15Mux_Pwm[];
+extern const NvU32* g_Ap15MuxPwm[];
+
+extern const NvU32 g_Ap15Mux_Hsi[];
+extern const NvU32 *g_Ap15MuxHsi[];
+
+extern const NvU32 g_Ap15Mux_Nand[];
+extern const NvU32* g_Ap15MuxNand[];
+
+extern const NvU32 g_Ap15MuxDap1[];
+extern const NvU32 g_Ap15MuxDap2[];
+extern const NvU32 g_Ap15MuxDap3[];
+extern const NvU32 g_Ap15MuxDap4[];
+extern const NvU32* g_Ap15MuxDap[];
+
+extern const NvU32 g_Ap15Mux_Kbc[];
+extern const NvU32* g_Ap15MuxKbc[];
+
+extern const NvU32 g_Ap15Mux_Hdcp[];
+extern const NvU32* g_Ap15MuxHdcp[];
+
+extern const NvU32 g_Ap15Mux_Hdmi[];
+extern const NvU32* g_Ap15MuxHdmi[];
+
+extern const NvU32 g_Ap15Mux_Mio[];
+extern const NvU32* g_Ap15MuxMio[];
+
+extern const NvU32 g_Ap15Mux_Slink[];
+extern const NvU32* g_Ap15MuxSlink[];
+
+extern const NvU32 g_Ap15Mux_Vi[];
+extern const NvU32* g_Ap15MuxVi[];
+
+extern const NvU32 g_Ap15Mux_Crt[];
+extern const NvU32* g_Ap15MuxCrt[];
+
+extern const NvU32 g_Ap15Mux_Display1[];
+extern const NvU32 g_Ap15Mux_Display2[];
+extern const NvU32* g_Ap15MuxDisplay[];
+
+extern const NvU32 g_Ap15Mux_Cdev1[];
+
+extern const NvU32 g_Ap15Mux_Cdev2[];
+extern const NvU32 g_Ap15Mux_Csus[];
+extern const NvU32* g_Ap15MuxCdev[];
+
+extern const NvU32 g_Ap15Mux_BacklightDisplay1Pwm0[];
+extern const NvU32 g_Ap15Mux_BacklightDisplay1Pwm1[];
+extern const NvU32 g_Ap15Mux_BacklightDisplay2Pwm0[];
+extern const NvU32 g_Ap15Mux_BacklightDisplay2Pwm1[];
+extern const NvU32* g_Ap15MuxBacklight[];
+
+static const NvU32 g_Ap16Mux_Ulpi[] = {
+ CONFIGEND(), // no pad groups reset to ULPI, so nothing to disown for reset config
+ CONFIG(B,A,UAA,ULPI), CONFIG(B,A,UAB,ULPI), CONFIG(B,A,UAC,ULPI), CONFIGEND(),
+ MODULEDONE()
+};
+static const NvU32* g_Ap16MuxUlpi[] = {
+ &g_Ap16Mux_Ulpi[0],
+ NULL
+};
+/* Array of all the controller types in the system, pointing to the array of
+ * instances of each controller. Indexed using the NvRmIoModule value.
+ */
+static const NvU32** g_Ap16MuxControllers[] = {
+ &g_Ap15MuxAta[0],
+ &g_Ap15MuxCrt[0],
+ NULL, // no options for CSI
+ &g_Ap15MuxDap[0],
+ &g_Ap15MuxDisplay[0],
+ NULL, // no options for DSI
+ NULL, // no options for GPIO
+ &g_Ap15MuxHdcp[0],
+ &g_Ap15MuxHdmi[0],
+ &g_Ap15MuxHsi[0],
+ &g_Ap15MuxMmc[0],
+ NULL, // no options for I2S
+ &g_Ap15MuxI2c[0],
+ &g_Ap15MuxI2cPmu[0],
+ &g_Ap15MuxKbc[0],
+ &g_Ap15MuxMio[0],
+ &g_Ap15MuxNand[0],
+ &g_Ap15MuxPwm[0],
+ &g_Ap15MuxSdio[0],
+ &g_Ap15MuxSflash[0],
+ &g_Ap15MuxSlink[0],
+ &g_Ap15MuxSpdif[0],
+ &g_Ap15MuxSpi[0],
+ &g_Ap15MuxTwc[0],
+ NULL, // no options for TVO
+ &g_Ap16MuxUart[0],
+ NULL, // no options for USB
+ NULL, // no options for VDD
+ &g_Ap15MuxVi[0],
+ NULL, // no options for XIO
+ &g_Ap15MuxCdev[0],
+ &g_Ap16MuxUlpi[0],
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL, // no options for TSENSor
+ &g_Ap15MuxBacklight[0],
+};
+
+NV_CT_ASSERT(NV_ARRAY_SIZE(g_Ap16MuxControllers)==NvOdmIoModule_Num);
+
+const NvU32***
+NvRmAp16GetPinMuxConfigs(NvRmDeviceHandle hDevice)
+{
+ NV_ASSERT(hDevice);
+ return (const NvU32***) g_Ap16MuxControllers;
+}
+
+NvBool NvRmPrivAp16RmModuleToOdmModule(
+ NvRmModuleID RmModule,
+ NvOdmIoModule *OdmModule,
+ NvU32 *OdmInstance,
+ NvU32 *pCnt)
+{
+ NvRmModuleID Module = NVRM_MODULE_ID_MODULE(RmModule);
+ NvU32 Instance = NVRM_MODULE_ID_INSTANCE(RmModule);
+
+ *OdmInstance = Instance;
+
+ switch (Module)
+ {
+ case NvRmModuleID_Usb2Otg:
+ if (Instance == 0)
+ {
+ *OdmModule = NvOdmIoModule_Usb;
+ *OdmInstance = 0;
+ }
+ else
+ {
+ // stop here for instance otherthan one
+ NV_ASSERT(Instance == 1);
+ *OdmModule = NvOdmIoModule_Ulpi;
+ *OdmInstance = 0;
+ }
+ *pCnt = 1;
+ return NV_TRUE;
+ default:
+ break;
+ }
+
+ return NvRmPrivAp15RmModuleToOdmModule(RmModule,
+ OdmModule, OdmInstance, pCnt);
+}
+
+
+NvError
+NvRmPrivAp16GetModuleInterfaceCaps(
+ NvOdmIoModule Module,
+ NvU32 Instance,
+ NvU32 PinMap,
+ void *pCaps)
+{
+ switch (Module)
+ {
+ case NvOdmIoModule_Uart:
+ if (Instance == 0)
+ {
+ if (PinMap == NvOdmUartPinMap_Config1)
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 8;
+ else if (PinMap == NvOdmUartPinMap_Config3)
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 7;
+ else if ((PinMap == NvOdmUartPinMap_Config2) || (PinMap == NvOdmUartPinMap_Config4))
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 4;
+ else if ((PinMap == NvOdmUartPinMap_Config5) || (PinMap == NvOdmUartPinMap_Config6))
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 2;
+ else
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 0;
+ }
+ else if (Instance == 1)
+ {
+ if (PinMap == NvOdmUartPinMap_Config1)
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 4;
+ else if (PinMap == NvOdmUartPinMap_Config2)
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 2;
+ else
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 0;
+ }
+ else if (Instance == 2)
+ {
+ if (PinMap == NvOdmUartPinMap_Config1)
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 4;
+ else if (PinMap == NvOdmUartPinMap_Config2)
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 2;
+ else
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 0;
+ }
+ else
+ {
+ NV_ASSERT(NV_FALSE);
+ return NvError_NotSupported;
+ }
+ return NvSuccess;
+
+ default:
+ break;
+ }
+ return NvRmPrivAp15GetModuleInterfaceCaps(Module, Instance, PinMap, pCaps);
+}
+
+
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/ap16rm_reloctable.c b/arch/arm/mach-tegra/nvrm/core/ap15/ap16rm_reloctable.c
new file mode 100644
index 000000000000..99a689921161
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/ap16rm_reloctable.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvrm_init.h"
+#include "common/nvrm_hwintf.h"
+#include "ap16/project_relocation_table.h"
+#include "ap15rm_private.h"
+
+static NvU32 s_RelocationTable[] =
+{
+ NV_RELOCATION_TABLE_INIT
+};
+
+NvU32 *
+NvRmPrivAp16GetRelocationTable( NvRmDeviceHandle hDevice )
+{
+ return s_RelocationTable;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/nvrm_clocks.c b/arch/arm/mach-tegra/nvrm/core/ap15/nvrm_clocks.c
new file mode 100644
index 000000000000..92e06db73ce6
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/nvrm_clocks.c
@@ -0,0 +1,3215 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Clock Resource manager </b>
+ *
+ * @b Description: Implements Clock control API. All code in this file chip
+ * independent. All chip dependent code should move to ap15rm_clocks.c file.
+ */
+
+#include "nvrm_clocks.h"
+#include "nvassert.h"
+#include "nvrm_drf.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_hwintf.h"
+#include "ap15rm_private.h"
+#include "ap15rm_clocks.h"
+#include "ap20/ap20rm_clocks.h"
+#include "nvrm_pmu_private.h"
+#include "nvrm_pinmux_utils.h"
+#include "nvodm_query_pinmux.h"
+#include "nvodm_query_discovery.h"
+
+// Module debug: 0=disable, 1=enable
+#define NVRM_ENABLE_PRINTF (0)
+
+#if (NV_DEBUG && NVRM_ENABLE_PRINTF)
+#define NVRM_POWER_PRINTF(x) NvOsDebugPrintf x
+#else
+#define NVRM_POWER_PRINTF(x)
+#endif
+
+// TODO: Replace NvOsWaitUS() with NvRmPrivWaitUS()
+// TODO: CAR access macro
+
+// Actual FPGA clock frequency for all modules is 8.33MHz
+// (display is exception)
+#define FPGA_MODULE_KHZ_AP15 (8330)
+#define FPGA_MODULE_KHZ_AP20 (13000)
+#define FPGA_DISPLAY_KHZ (27000)
+
+// QT clock frequency used as a special value (actual frequency is irrelevant)
+#define QT_MODULE_KHZ (1)
+
+// UART rate divider is part of the UART module and it is not discribed
+// in central module clock information table. Hence, need this define.
+#define NVRM_UART_TIMING_DIVISOR_MAX (0xFFFFUL)
+
+/*****************************************************************************/
+
+// Clock source descriptors and frequencies
+static NvRmClockSourceInfo* s_ClockSourceTable = NULL;
+static NvU32 s_ClockSourceFreq[NvRmClockSource_Num];
+static NvRmSystemBusComplexInfo s_SystemBusComplex = {0};
+
+// Module clocks frequency limits
+static const NvRmModuleClockLimits* s_ModuleClockLimits;
+
+// Module clocks descriptors and module clock state arrays of the same size
+static const NvRmModuleClockInfo *s_moduleClockTable;
+static NvU32 s_moduleClockTableSize;
+static NvRmModuleClockState *s_moduleClockState = NULL;
+
+// PLL references
+static NvRmPllReference* s_PllReferencesTable;
+static NvU32 s_PllReferencesTableSize;
+static NvBool s_MipiPllVddOn = NV_FALSE;
+
+// Mutex for thread-safe access to clock control records and h/w
+static NvOsMutexHandle s_hClockMutex = NULL;
+
+// Mutex for thread-safe access to shared PLLs
+static NvOsMutexHandle s_hPllMutex = NULL;
+
+/*****************************************************************************/
+
+NvError
+NvRmPrivGetClockState(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId,
+ NvRmModuleClockInfo** CinfoOut,
+ NvRmModuleClockState** StateOut)
+{
+ NvRmModuleInstance* inst;
+
+ NV_ASSERT( hDevice );
+ NV_ASSERT(s_moduleClockState);
+
+ if (NvRmPrivGetModuleInstance(hDevice, ModuleId, &inst) != NvSuccess)
+ {
+ return NvError_ModuleNotPresent;
+ }
+ if (inst->ModuleData)
+ {
+ *CinfoOut = (NvRmModuleClockInfo*)inst->ModuleData;
+ *StateOut = &s_moduleClockState[(*CinfoOut) - s_moduleClockTable];
+ return NvSuccess;
+ }
+ else
+ {
+ // Starting with AP20 no dedicated HSMMC clock (mapped to SDMMC)
+ if ((ModuleId == NvRmModuleID_Hsmmc) &&
+ (hDevice->ChipId.Id != 0x15) && (hDevice->ChipId.Id != 0x16))
+ {
+ return NvError_ModuleNotPresent;
+ }
+ NV_ASSERT(!"module clock info missing --"
+ " fillup the [apxx]rm_clocks_info.c file");
+ return NvError_NotSupported;
+ }
+}
+
+/*****************************************************************************/
+
+static void
+NvRmPrivPllDPowerControl(
+ NvRmDeviceHandle hDevice,
+ NvBool ConfigEntry,
+ NvBool* pMipiPllVddOn)
+{
+ NvRmPrivAp15PllDPowerControl(hDevice, ConfigEntry, pMipiPllVddOn);
+}
+
+static void
+NvRmPrivDisablePLLs(
+ NvRmDeviceHandle hDevice,
+ const NvRmModuleClockInfo* cinfo,
+ const NvRmModuleClockState* state)
+{
+ NvRmPrivAp15DisablePLLs(hDevice, cinfo, state);
+}
+
+static NvRmClockSource
+NvRmPrivGetImplicitPllSource(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module)
+{
+ switch (Module)
+ {
+ // DSI, CSI, I2C and UART modules are implicitely attached to PLLP3
+ // output derived from primary PLLP0.
+ case NvRmModuleID_Dsi:
+ case NvRmModuleID_Csi:
+ case NvRmModuleID_I2c:
+ case NvRmModuleID_Uart:
+ return NvRmClockSource_PllP0;
+
+ // MPE depends on PLLA for audio in AP15/16
+ case NvRmModuleID_Mpe:
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ return NvRmClockSource_PllA0;
+ // fall through
+
+ // No implicit dependencies for other modules
+ default:
+ return NvRmClockSource_Invalid;
+ }
+}
+
+static void
+NvRmPrivModuleClockAttach(
+ NvRmDeviceHandle hDevice,
+ const NvRmModuleClockInfo* cinfo,
+ const NvRmModuleClockState* state,
+ NvBool Enable)
+{
+ NvU32 i, reg;
+ NvBool Enabled;
+ NvRmClockSource SubSourceId = NvRmClockSource_Invalid;
+ NvRmClockSource SourceId = cinfo->Sources[state->SourceClock];
+
+ if ((cinfo->Module == NvRmModuleID_Spdif) ||
+ (cinfo->Module == NvRmModuleID_Vi) ||
+ (cinfo->Module == NvRmModuleID_Tvo))
+ {
+ // Find secondary source for modules with explicit subclocks; subclock
+ // descriptor and state are located after main ones, respectively
+ SubSourceId = (cinfo + 1)->Sources[(state + 1)->SourceClock];
+ }
+ else
+ {
+ // Find implicit secondary source (if any) for other modules
+ SubSourceId = NvRmPrivGetImplicitPllSource(hDevice, cinfo->Module);
+
+ }
+
+ NV_ASSERT(cinfo->ClkEnableOffset);
+ reg = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ cinfo->ClkEnableOffset);
+ Enabled = ((reg & cinfo->ClkEnableField) == cinfo->ClkEnableField);
+ if (Enabled == Enable)
+ return; // Exit if no changes in clock status
+
+ for (i = 0; i < s_PllReferencesTableSize; i++)
+ {
+ // If module clock is to be enabled - attach sources (inc ref count)
+ // If module clock is to be disabled - detach sources (dec ref count)
+ if (s_PllReferencesTable[i].SourceId == SourceId)
+ NvRmPrivPllRefUpdate(hDevice, &s_PllReferencesTable[i], Enable);
+ if (s_PllReferencesTable[i].SourceId == SubSourceId)
+ NvRmPrivPllRefUpdate(hDevice, &s_PllReferencesTable[i], Enable);
+ }
+}
+
+void
+NvRmPrivModuleClockReAttach(
+ NvRmDeviceHandle hDevice,
+ const NvRmModuleClockInfo* cinfo,
+ const NvRmModuleClockState* state)
+{
+ NvU32 i, reg;
+ NvRmClockSource SourceId = cinfo->Sources[state->SourceClock];
+
+ for (i = 0; i < s_PllReferencesTableSize; i++)
+ {
+ NvBool* pAttached =
+ &s_PllReferencesTable[i].AttachedModules[cinfo - s_moduleClockTable];
+ NvBool WasAttached = *pAttached;
+ NvBool IsAttached = (s_PllReferencesTable[i].SourceId == SourceId);
+
+ if (WasAttached != IsAttached)
+ {
+ // Changes in source reference always recorded but affect
+ // ref count only when the module clock is enabled
+ if(cinfo->ClkEnableOffset != 0)
+ {
+ reg = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ cinfo->ClkEnableOffset);
+ if ((reg & cinfo->ClkEnableField) == cinfo->ClkEnableField)
+ {
+ NvRmPrivPllRefUpdate(
+ hDevice, &s_PllReferencesTable[i], IsAttached);
+ }
+ }
+ *pAttached = IsAttached;
+ }
+ }
+}
+
+static void
+NvRmPrivCoreClockReAttach(
+ NvRmDeviceHandle hDevice,
+ NvRmClockSource CoreId,
+ NvRmClockSource SourceId)
+{
+ static NvU32 s_CpuModuleIndex = (NvU32)-1;
+ static NvU32 s_AvpModuleIndex = (NvU32)-1;
+
+ NvU32 i, ModuleIndex;
+
+ // Map core bus clock to processor module. CPU, AVP are not in relocation
+ // table, can not use module instance shortcut - search clock descriptors.
+ if (CoreId == NvRmClockSource_CpuBus)
+ {
+ if (s_CpuModuleIndex == (NvU32)-1)
+ {
+ for (i = 0; i < s_moduleClockTableSize; i++)
+ {
+ if (s_moduleClockTable[i].Module == NvRmModuleID_Cpu)
+ break;
+ }
+ s_CpuModuleIndex = i;
+ }
+ NV_ASSERT(s_CpuModuleIndex < s_moduleClockTableSize);
+ ModuleIndex = s_CpuModuleIndex;
+ }
+ else if (CoreId == NvRmClockSource_SystemBus)
+ {
+ if (s_AvpModuleIndex == (NvU32)-1)
+ {
+ for (i = 0; i < s_moduleClockTableSize; i++)
+ {
+ if (s_moduleClockTable[i].Module == NvRmModuleID_Avp)
+ break;
+ }
+ s_AvpModuleIndex = i;
+ }
+ NV_ASSERT(s_AvpModuleIndex < s_moduleClockTableSize);
+ ModuleIndex = s_AvpModuleIndex;
+ }
+ else
+ {
+ NV_ASSERT(!"Invalid core id");
+ return;
+ }
+
+ // Map secondary divided PLL outputs to primary PLLs
+ switch (SourceId)
+ {
+ case NvRmClockSource_PllC1:
+ SourceId = NvRmClockSource_PllC0;
+ break;
+ case NvRmClockSource_PllM1:
+ SourceId = NvRmClockSource_PllM0;
+ break;
+ case NvRmClockSource_PllP1:
+ case NvRmClockSource_PllP2:
+ case NvRmClockSource_PllP3:
+ case NvRmClockSource_PllP4:
+ SourceId = NvRmClockSource_PllP0;
+ break;
+ default:
+ break;
+ }
+
+ // Record changes in PLL references and update ref count
+ for (i = 0; i < s_PllReferencesTableSize; i++)
+ {
+ NvBool* pAttached =
+ &s_PllReferencesTable[i].AttachedModules[ModuleIndex];
+ NvBool WasAttached = *pAttached;
+ NvBool IsAttached = (s_PllReferencesTable[i].SourceId == SourceId);
+
+ if (WasAttached != IsAttached)
+ {
+ *pAttached = IsAttached;
+ NvRmPrivPllRefUpdate(hDevice, &s_PllReferencesTable[i], IsAttached);
+ }
+ }
+}
+
+void
+NvRmPrivMemoryClockReAttach(
+ NvRmDeviceHandle hDevice,
+ const NvRmModuleClockInfo* cinfo,
+ const NvRmModuleClockState* state)
+{
+ NvU32 i;
+ NvRmClockSource SourceId = cinfo->Sources[state->SourceClock];
+
+ // MC clock on AP20 is always the same as EMC1x domain clock - no need for
+ // source reference double-counting.
+ if ((hDevice->ChipId.Id == 0x20) &&
+ (cinfo->Module == NvRmPrivModuleID_MemoryController))
+ return;
+
+ for (i = 0; i < s_PllReferencesTableSize; i++)
+ {
+ NvBool* pAttached =
+ &s_PllReferencesTable[i].AttachedModules[cinfo - s_moduleClockTable];
+ NvBool WasAttached = *pAttached;
+ NvBool IsAttached = (s_PllReferencesTable[i].SourceId == SourceId);
+
+ // Record changes in PLL references and update ref count.
+ // TODO: secondary PLL outputs mapping (only primary PLLs are used now)
+ if (WasAttached != IsAttached)
+ {
+ *pAttached = IsAttached;
+ NvRmPrivPllRefUpdate(hDevice, &s_PllReferencesTable[i], IsAttached);
+ }
+ }
+}
+
+void
+NvRmPrivExternalClockAttach(
+ NvRmDeviceHandle hDevice,
+ NvRmClockSource SourceId,
+ NvBool Enable)
+{
+ NvU32 i;
+
+ // Map secondary divided PLL outputs to primary PLLs
+ switch (SourceId)
+ {
+ case NvRmClockSource_PllC1:
+ SourceId = NvRmClockSource_PllC0;
+ break;
+ case NvRmClockSource_PllM1:
+ SourceId = NvRmClockSource_PllM0;
+ break;
+ case NvRmClockSource_PllP1:
+ case NvRmClockSource_PllP2:
+ case NvRmClockSource_PllP3:
+ case NvRmClockSource_PllP4:
+ SourceId = NvRmClockSource_PllP0;
+ break;
+ default:
+ break;
+ }
+
+ // Attach external clock
+ for (i = 0; i < s_PllReferencesTableSize; i++)
+ {
+ if (s_PllReferencesTable[i].SourceId == SourceId)
+ {
+ // If ext clock is enabled - attach source (inc ref count)
+ // If ext clock is disabled - detach source (dec ref count)
+ NvOsMutexLock(s_hClockMutex);
+ s_PllReferencesTable[i].ExternalClockRefCnt += (Enable ? 1 : (-1));
+ NvRmPrivPllRefUpdate(hDevice, &s_PllReferencesTable[i], Enable);
+
+ // Configure clock source if necessary (required for PLLA)
+ if (SourceId == NvRmClockSource_PllA0)
+ NvRmPrivConfigureClockSource(hDevice, NvRmModuleID_I2s, Enable);
+ NvOsMutexUnlock(s_hClockMutex);
+ }
+ }
+}
+
+/*****************************************************************************/
+
+void
+NvRmPrivEnableModuleClock(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID ModuleId,
+ ModuleClockState ClockState)
+{
+ switch (hRmDevice->ChipId.Id)
+ {
+ case 0x15:
+ case 0x16:
+ Ap15EnableModuleClock(hRmDevice, ModuleId, ClockState);
+ break;
+ case 0x20:
+ Ap20EnableModuleClock(hRmDevice, ModuleId, ClockState);
+ break;
+ default:
+ NV_ASSERT(!"Unsupported chip ID");
+ }
+}
+
+NvError NvRmPowerModuleClockControl(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId,
+ NvU32 ClientId,
+ NvBool Enable)
+{
+ NvRmModuleClockInfo *cinfo;
+ NvRmModuleClockState *state;
+ NvRmMilliVolts v;
+ NvError err;
+ ModuleClockState ClockState =
+ Enable ? ModuleClockState_Enable : ModuleClockState_Disable;
+
+ if (NvRmPrivIsDiagMode(ModuleId))
+ return NvSuccess;
+
+ // Get pointers to module clock info and current module clock state
+ err = NvRmPrivGetClockState(hDevice, ModuleId, &cinfo, &state);
+ if (err != NvSuccess)
+ return err;
+
+ // Check if voltage scaling is required before module clock is enabled.
+ // Core voltage access is shared with DVFS. PMU access transport must
+ // *not* be scalable.
+ if (state->Vscale)
+ {
+ NvOsMutexLock(s_hPllMutex);
+ if (Enable)
+ {
+ if (NVRM_MODULE_ID_MODULE(ModuleId) == NvRmModuleID_Dsi)
+ NvRmPrivPllDPowerControl(hDevice, NV_TRUE, &s_MipiPllVddOn);
+ v = NvRmPrivModuleVscaleAttach(hDevice, cinfo, state, NV_TRUE);
+ NvRmPrivDvsRequest(v);
+ }
+ }
+ NvOsMutexLock(s_hClockMutex);
+
+ // Update reference count, and exit if
+ // - clock enable requested and module clock is already enabled
+ // - clock disable requested, but not all enable requests have been matched
+ if (Enable)
+ {
+ if (state->refCount != 0)
+ {
+ state->refCount++;
+ goto leave; // err = NvSuccess already
+ }
+ state->refCount = 1;
+ }
+ else if (state->refCount != 0)
+ {
+ state->refCount --;
+ if (state->refCount != 0)
+ {
+ goto leave; // err = NvSuccess already
+ }
+ }
+ else
+ {
+ // TODO: assert on disable without enable
+ NvOsDebugPrintf(
+ "Clock control balance failed for module %d, instance %d\n",
+ NVRM_MODULE_ID_MODULE(ModuleId), NVRM_MODULE_ID_INSTANCE(ModuleId));
+ // NV_ASSERT(!"Clock control balance violation");
+ }
+ NvRmPrivModuleClockAttach(hDevice, cinfo, state, Enable);
+ NvRmPrivEnableModuleClock(hDevice, ModuleId, ClockState);
+
+ // Common exit
+leave:
+ NvOsMutexUnlock(s_hClockMutex);
+ if (state->Vscale)
+ {
+ if (!Enable)
+ {
+ if (NVRM_MODULE_ID_MODULE(ModuleId) == NvRmModuleID_Dsi)
+ NvRmPrivPllDPowerControl(hDevice, NV_FALSE, &s_MipiPllVddOn);
+ v = NvRmPrivModuleVscaleAttach(hDevice, cinfo, state, NV_FALSE);
+ NvRmPrivDvsRequest(v);
+ }
+ NvOsMutexUnlock(s_hPllMutex);
+ }
+ return err;
+}
+
+/*****************************************************************************/
+
+ExecPlatform NvRmPrivGetExecPlatform(NvRmDeviceHandle hRmDeviceHandle)
+{
+ if (hRmDeviceHandle->ChipId.Major != 0)
+ {
+ return ExecPlatform_Soc;
+ }
+ if (NvRmIsSimulation())
+ {
+ return ExecPlatform_Sim;
+ }
+ if (hRmDeviceHandle->ChipId.Minor != 0)
+ {
+ return ExecPlatform_Fpga;
+ }
+ return ExecPlatform_Qt;
+}
+
+/*****************************************************************************/
+
+/* Sets module clock source/divider register */
+void NvRmPrivModuleClockSet(
+ NvRmDeviceHandle hDevice,
+ const NvRmModuleClockInfo* cinfo,
+ const NvRmModuleClockState* state)
+{
+ NvU32 reg, divisor;
+
+ NV_ASSERT(cinfo->ClkSourceOffset);
+ reg = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0, cinfo->ClkSourceOffset);
+ divisor = (reg >> cinfo->DivisorFieldShift) & cinfo->DivisorFieldMask;
+ if ((cinfo->Divider != NvRmClockDivider_None) &&
+ (state->Divider > divisor))
+ {
+ // Switch divider 1st, source 2nd, if new divisor is bigger
+ NV_ASSERT(state->Divider <= cinfo->DivisorFieldMask);
+ reg &= ~(cinfo->DivisorFieldMask << cinfo->DivisorFieldShift);
+ reg |= state->Divider << cinfo->DivisorFieldShift;
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0, cinfo->ClkSourceOffset, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ }
+
+ NV_ASSERT(state->SourceClock <= cinfo->SourceFieldMask);
+ reg &= (~(cinfo->SourceFieldMask << cinfo->SourceFieldShift));
+ reg |= ( state->SourceClock << cinfo->SourceFieldShift);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0, cinfo->ClkSourceOffset, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+
+ if ((cinfo->Divider != NvRmClockDivider_None) &&
+ (state->Divider < divisor))
+ {
+ // Switch source 1st, divider 2nd, if new divisor is smaller
+ reg &= ~(cinfo->DivisorFieldMask << cinfo->DivisorFieldShift);
+ reg |= state->Divider << cinfo->DivisorFieldShift;
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0, cinfo->ClkSourceOffset, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ }
+}
+
+static NvRmFreqKHz
+NvRmPrivGetEmcSyncFreq(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID Module)
+{
+ switch (hDevice->ChipId.Id)
+ {
+ case 0x15:
+ case 0x16:
+ return NvRmPrivAp15GetEmcSyncFreq(hDevice, Module);
+ case 0x20:
+ return NvRmPrivAp20GetEmcSyncFreq(hDevice, Module);
+ default:
+ NV_ASSERT(!"Unsupported chip ID");
+ return NvRmFreqMaximum;
+ }
+}
+
+static NvBool
+NvRmPrivIsModuleClockException(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleClockInfo *cinfo,
+ NvU32 clockSourceCount,
+ NvU32 MinFreq,
+ NvU32 MaxFreq,
+ const NvRmFreqKHz* PrefFreqList,
+ NvU32 PrefCount,
+ NvRmModuleClockState *state,
+ NvU32 flags)
+{
+ return NvRmPrivAp15IsModuleClockException(
+ hDevice, cinfo, clockSourceCount, MinFreq, MaxFreq,
+ PrefFreqList, PrefCount, state, flags);
+}
+
+/* Returns the best source clock and the best divider */
+static NvError NvRmFindBestClockSource(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleClockInfo *cinfo,
+ NvU32 clockSourceCount,
+ NvU32 MinFreq,
+ NvU32 MaxFreq,
+ const NvRmFreqKHz* PrefFreqList,
+ NvU32 PrefCount,
+ NvRmModuleClockState *state,
+ NvU32 flags)
+{
+ NvU32 bestdiff = 0x7FFFFFFF;
+ NvU32 bestdiv = 0x0;
+ NvU32 SourceClock = (NvU32)-1;
+ NvU32 SourceClockFreq = 0;
+ NvU32 i = 0,j = 0;
+ NvRmFreqKHz freq = 0, ReachedFreq = 0;
+ NvU32 temp = 0, div = 0, mantissa = 0;
+ NvS32 diff = 0;
+
+ NV_ASSERT((MinFreq != 0) && (MinFreq <= MaxFreq));
+
+ // Check if exceptional handling is required this module clock, and exit
+ // if it is completed
+ if (NvRmPrivIsModuleClockException(hDevice, cinfo, clockSourceCount,
+ MinFreq, MaxFreq, PrefFreqList, PrefCount, state, flags))
+ return NvSuccess;
+
+ for (j=0; j< PrefCount; j++) // loop through target frequencies
+ {
+ freq = (PrefFreqList[j] == NvRmFreqMaximum) ? MaxFreq : PrefFreqList[j];
+ if (flags & NvRmClockConfig_QuietOverClock)
+ freq = (PrefFreqList[j] > MaxFreq) ? MaxFreq : PrefFreqList[j];
+ if ((freq < MinFreq) || (freq > MaxFreq))
+ continue;
+
+ for (i=0; i< clockSourceCount; i++) // loop through avilable sources
+ {
+ NV_ASSERT(cinfo->Sources[i] < NvRmClockSource_Num);
+ if (cinfo->Sources[i] == NvRmClockSource_Invalid)
+ break;
+
+ SourceClockFreq = s_ClockSourceFreq[(cinfo->Sources[i])];
+ if (SourceClockFreq < MinFreq)
+ continue;
+ if (NvRmPrivIsSourceProtected(
+ hDevice, cinfo->Module, cinfo->Sources[i]))
+ continue;
+
+ if ((cinfo->Divider == NvRmClockDivider_None) ||
+ (SourceClockFreq <= freq))
+ {
+ div = 1;
+ if (cinfo->Module == NvRmModuleID_Uart)
+ {
+ // If target is not reachable from the source by integer
+ // division - reject the source
+ if (!NvRmIsFreqRangeReachable(SourceClockFreq,
+ MinFreq, MaxFreq, NVRM_UART_TIMING_DIVISOR_MAX))
+ continue;
+ }
+ else if (SourceClockFreq > MaxFreq)
+ continue;
+ }
+ else // Divider, SourceClockFreq > freq
+ {
+ // Default integer divider: Freq = SourceClockFreq / div
+ // where div = h/w divisor field
+ NvU32 MaxDivisor = cinfo->DivisorFieldMask;
+ NV_ASSERT(MaxDivisor);
+
+ if (cinfo->Divider == NvRmClockDivider_Integer_1)
+ {
+ // Integer divider: Freq = SourceClockFreq / div
+ // where div = h/w divisor field + 1
+ MaxDivisor += 1;
+ }
+ else if (cinfo->Divider == NvRmClockDivider_Fractional_2)
+ {
+ // Fractional divider: Freq = (SourceClockFreq * 2) / div
+ // where div = h/w divisor field + 2
+ SourceClockFreq = (SourceClockFreq << 1);
+ MaxDivisor += 2;
+ }
+
+ // Find divisor floor / freq ceiling, and
+ // the 1st bit of the fractional part
+ temp = (SourceClockFreq << 1) / freq;
+ div = temp >> 1;
+ mantissa = temp & 0x01;
+
+ // Check if divisor value fits divisor field
+ if (div >= MaxDivisor)
+ {
+ div = MaxDivisor;
+ if (SourceClockFreq > div * (NvU64)MaxFreq)
+ continue; // max boundary violation at max divisor
+ }
+ else if (SourceClockFreq > div * (NvU64)MaxFreq)
+ {
+ div += 1; // divisor ceiling / freq floor
+ if (SourceClockFreq < div * (NvU64)MinFreq)
+ continue; // both max and min boundaries violation
+ }
+ else if (mantissa)
+ {
+ div += 1; // divisor ceiling / freq floor
+ if (SourceClockFreq < div * (NvU64)MinFreq)
+ div -= 1; // fall back to divisor floor / freq ceiling
+ }
+ }
+ // Check if new traget frequency approximation is the best, so far
+ ReachedFreq = SourceClockFreq / div;
+ diff = freq - ReachedFreq;
+ if (diff < 0)
+ diff *= -1;
+ if ( ((NvU32) diff < bestdiff) ||
+ (((NvU32) diff == bestdiff) && (div < bestdiv)) )
+ {
+ SourceClock = i;
+ bestdiv = div;
+ bestdiff = (NvU32)diff;
+ }
+ }
+ // stop searching if "perfect" match found
+ if (!bestdiff)
+ break;
+ }
+
+ if ((bestdiv == 0) || (SourceClock == (NvU32) -1))
+ {
+ NV_ASSERT(!"No clock source found for this panel");
+ return NvError_NotSupported;
+ }
+
+ // Fill in clock state taking into account different types of dividers
+ state->Divider = bestdiv;
+ state->SourceClock = SourceClock;
+ SourceClockFreq = s_ClockSourceFreq[cinfo->Sources[SourceClock]];
+
+ if (cinfo->Divider == NvRmClockDivider_Integer_1)
+ {
+ state->Divider = bestdiv - 1;
+ }
+ else if (cinfo->Divider == NvRmClockDivider_Fractional_2)
+ {
+ if (bestdiv == 1)
+ bestdiv = 2; // cast pass thru case into generic formula
+ state->Divider = (bestdiv - 2);
+ SourceClockFreq = (SourceClockFreq << 1);
+ }
+
+ state->actual_freq = SourceClockFreq / bestdiv;
+
+ return NvSuccess;
+}
+
+/*****************************************************************************/
+
+static void RmReset2D(NvRmDeviceHandle hRmDevice)
+{
+ switch (hRmDevice->ChipId.Id)
+ {
+ case 0x15:
+ case 0x16:
+ NvRmPrivAp15Reset2D(hRmDevice);
+ return;
+ case 0x20:
+ NvRmPrivAp20Reset2D(hRmDevice);
+ return;
+ default:
+ NV_ASSERT(!"Unsupported chip ID");
+ return;
+ }
+}
+
+static void ScaledClockConfigInit(NvRmDeviceHandle hRmDevice)
+{
+ if (NvRmPrivGetExecPlatform(hRmDevice) != ExecPlatform_Soc)
+ return; // Initialize scaled clock configuration only on SoC
+
+ switch (hRmDevice->ChipId.Id)
+ {
+ case 0x15:
+ case 0x16:
+ NvRmPrivAp15EmcConfigInit(hRmDevice);
+ return;
+ case 0x20:
+ NvRmPrivAp20ScaledClockConfigInit(hRmDevice);
+ return;
+ default:
+ NV_ASSERT(!"Unsupported chip ID");
+ return;
+ }
+}
+
+static void ModuleClockStateInit(NvRmDeviceHandle hRmDevice)
+{
+ NvError e;
+ NvU32 i, j, flags, reg;
+ NvRmModuleID ModuleId;
+ NvRmClockSource ImplicitPll;
+ const NvRmModuleClockInfo* cinfo;
+ NvRmModuleClockState *state;
+
+ for (i = 0; i < s_moduleClockTableSize; i++)
+ {
+ flags = 0;
+ ImplicitPll = NvRmClockSource_Invalid;
+ cinfo = &s_moduleClockTable[i];
+ state = &s_moduleClockState[i];
+ ModuleId = NVRM_MODULE_ID(cinfo->Module, cinfo->Instance);
+
+ if (cinfo->SubClockId)
+ {
+ // Check module subclock configuration
+ if ((cinfo->Module == NvRmModuleID_Spdif) ||
+ (cinfo->Module == NvRmModuleID_Vi) ||
+ (cinfo->Module == NvRmModuleID_Tvo))
+ flags = NvRmClockConfig_SubConfig;
+ }
+ else
+ {
+ // Check implicit attachment to PLLs for main clocks only
+ ImplicitPll = NvRmPrivGetImplicitPllSource(hRmDevice, cinfo->Module);
+ }
+
+ // Fill in module clock state, attach explicit PLL sources for clocks
+ // and subclocks. Special cases: CPU and AVP are not in the relocation
+ // table, and attached to PLL via CPU and System bus, respectively
+ e = NvRmPowerModuleClockConfig(
+ hRmDevice, ModuleId, 0, 0, 0, NULL, 0, NULL, flags);
+ NV_ASSERT((e == NvSuccess) || (e == NvError_ModuleNotPresent));
+ if (e == NvSuccess)
+ {
+ NvRmMilliVolts v; // can be ignored as we always boot at max V
+ NvRmFreqKHz SourceClockFreq =
+ s_ClockSourceFreq[(cinfo->Sources[state->SourceClock])];
+ NvRmPrivModuleSetScalingAttribute(hRmDevice, cinfo, state);
+ v = NvRmPrivModuleVscaleReAttach(
+ hRmDevice, cinfo, state, state->actual_freq, SourceClockFreq);
+ (void)v;
+ }
+ else if ((cinfo->Module == NvRmModuleID_Cpu) ||
+ (cinfo->Module == NvRmModuleID_Avp))
+ {
+ const NvRmCoreClockInfo* pCore =
+ NvRmPrivGetClockSourceHandle(cinfo->Sources[0])->pInfo.pCore;
+ NvRmClockSource SourceId =
+ NvRmPrivCoreClockSourceGet(hRmDevice, pCore);
+ NvRmPrivCoreClockReAttach(hRmDevice, pCore->SourceId, SourceId);
+ }
+
+ // Attach implicit PLL sources
+ if (ImplicitPll != NvRmClockSource_Invalid)
+ {
+ NV_ASSERT(cinfo->ClkEnableOffset);
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ cinfo->ClkEnableOffset);
+ if ((reg & cinfo->ClkEnableField) == cinfo->ClkEnableField)
+ {
+ for (j = 0; j < s_PllReferencesTableSize; j++)
+ {
+ if (s_PllReferencesTable[j].SourceId == ImplicitPll)
+ NvRmPrivPllRefUpdate(
+ hRmDevice, &s_PllReferencesTable[j], NV_TRUE);
+ }
+ }
+ }
+ }
+}
+
+NvError
+NvRmPrivClocksInit(NvRmDeviceHandle hRmDevice)
+{
+ NvRmModuleID ModuleId;
+ NvU32 i = 0;
+ NvU32 fpgaModuleFreq = 0;
+ ExecPlatform env;
+ NvError e;
+
+ NV_ASSERT(hRmDevice);
+ env = NvRmPrivGetExecPlatform(hRmDevice);
+
+ NV_CHECK_ERROR_CLEANUP(NvOsMutexCreate(&s_hClockMutex));
+ NV_CHECK_ERROR_CLEANUP(NvOsMutexCreate(&s_hPllMutex));
+
+ /*
+ * Clock tree descriptors and reference tables initialization
+ */
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ {
+ s_moduleClockTable = g_Ap15ModuleClockTable;
+ s_moduleClockTableSize = g_Ap15ModuleClockTableSize;
+ NvRmPrivAp15PllReferenceTableInit(&s_PllReferencesTable,
+ &s_PllReferencesTableSize);
+ s_ClockSourceTable = NvRmPrivAp15ClockSourceTableInit();
+ fpgaModuleFreq = FPGA_MODULE_KHZ_AP15;
+ }
+ else if (hRmDevice->ChipId.Id == 0x20)
+ {
+ s_moduleClockTable = g_Ap20ModuleClockTable;
+ s_moduleClockTableSize = g_Ap20ModuleClockTableSize;
+ NvRmPrivAp20PllReferenceTableInit(&s_PllReferencesTable,
+ &s_PllReferencesTableSize);
+ s_ClockSourceTable = NvRmPrivAp20ClockSourceTableInit();
+ fpgaModuleFreq = FPGA_MODULE_KHZ_AP20;
+ }
+ else
+ NV_ASSERT(!"Unsupported chip ID");
+
+ /*
+ * Allocate module clock state array, and map module clock descriptors
+ * to module instances.
+ */
+ s_moduleClockState = (NvRmModuleClockState *)
+ NvOsAlloc(sizeof (NvRmModuleClockState) * s_moduleClockTableSize);
+ if (s_moduleClockState == NULL)
+ {
+ e = NvError_InsufficientMemory;
+ goto fail;
+ }
+ NvOsMemset(s_moduleClockState, 0,
+ sizeof(NvRmModuleClockState) * s_moduleClockTableSize);
+
+ for (i = 0; i < s_moduleClockTableSize; i++)
+ {
+ NvRmModuleInstance* inst;
+ ModuleId = NVRM_MODULE_ID(
+ s_moduleClockTable[i].Module, s_moduleClockTable[i].Instance);
+
+ if (s_moduleClockTable[i].SubClockId)
+ continue; // skip subclocks
+
+ if (NvRmPrivGetModuleInstance(hRmDevice, ModuleId, &inst) == NvSuccess)
+ {
+ inst->ModuleData = (void *)&s_moduleClockTable[i];
+ }
+ else
+ {
+ // NvOsDebugPrintf(
+ // "No module found for clock descriptor with module ID %d\n", ModuleID);
+ }
+ }
+
+ /*
+ * Clock limits and sources initialization
+ */
+ s_ModuleClockLimits = NvRmPrivClockLimitsInit(hRmDevice);
+ s_ClockSourceFreq[NvRmClockSource_Invalid] = 0;
+ s_SystemBusComplex.BusRateOffset = 0;
+ {
+ if (env == ExecPlatform_Fpga)
+ {
+ s_ClockSourceFreq[NvRmClockSource_ClkS] = 32;
+ s_ClockSourceFreq[NvRmClockSource_ClkM] = fpgaModuleFreq;
+ s_ClockSourceFreq[NvRmClockSource_ClkD] = fpgaModuleFreq;
+ s_ClockSourceFreq[NvRmClockSource_PllA0] = 12288;
+ s_ClockSourceFreq[NvRmClockSource_PllP0] = fpgaModuleFreq;
+ s_ClockSourceFreq[NvRmClockSource_PllC0] = fpgaModuleFreq;
+ s_ClockSourceFreq[NvRmClockSource_PllM0] = fpgaModuleFreq;
+ s_ClockSourceFreq[NvRmClockSource_PllX0] = fpgaModuleFreq;
+ s_ClockSourceFreq[NvRmClockSource_CpuBus] = fpgaModuleFreq;
+ s_ClockSourceFreq[NvRmClockSource_SystemBus] = fpgaModuleFreq;
+ NvRmPrivBusClockInit(
+ hRmDevice, s_ClockSourceFreq[NvRmClockSource_SystemBus]);
+ }
+ else if ((env == ExecPlatform_Qt) || (env == ExecPlatform_Sim))
+ {
+ s_ClockSourceFreq[NvRmClockSource_ClkS] = 32;
+ if (env == ExecPlatform_Sim) // On sim set main frequency 13MHz
+ {
+ s_ClockSourceFreq[NvRmClockSource_ClkM] = 13000;
+ s_ClockSourceFreq[NvRmClockSource_ClkD] = 26000;
+ }
+ else // On Qt keep 12MHz
+ {
+ s_ClockSourceFreq[NvRmClockSource_ClkM] = 12000;
+ s_ClockSourceFreq[NvRmClockSource_ClkD] = 24000;
+ }
+ s_ClockSourceFreq[NvRmClockSource_PllA0] = 12288;
+ s_ClockSourceFreq[NvRmClockSource_PllP0] = 432000;
+ s_ClockSourceFreq[NvRmClockSource_PllP1] = 28800;
+ s_ClockSourceFreq[NvRmClockSource_PllP2] = 48000;
+ s_ClockSourceFreq[NvRmClockSource_PllP3] = 72000;
+ s_ClockSourceFreq[NvRmClockSource_PllP4] = 108000;
+ s_ClockSourceFreq[NvRmClockSource_PllC0] = 600000;
+ s_ClockSourceFreq[NvRmClockSource_PllM0] = 333000;
+ s_ClockSourceFreq[NvRmClockSource_SystemBus] = 150000;
+ NvRmPrivAp15SimPllInit(hRmDevice); // Enable plls in simulation
+ NvRmPrivBusClockInit(
+ hRmDevice, s_ClockSourceFreq[NvRmClockSource_SystemBus]);
+ }
+ else if (env == ExecPlatform_Soc)
+ {
+ NvRmPrivClockSourceFreqInit(hRmDevice, s_ClockSourceFreq);
+ }
+ else
+ {
+ NV_ASSERT(!"Not supported execution platform");
+ }
+ RmReset2D(hRmDevice);
+ }
+
+ /*
+ * Initialize current modules clock state
+ */
+ if (env == ExecPlatform_Fpga)
+ {
+ for (i = 0; i < s_moduleClockTableSize; i++)
+ {
+ s_moduleClockState[i].actual_freq = fpgaModuleFreq;
+ }
+ }
+ else if (env == ExecPlatform_Qt)
+ {
+ for (i = 0; i < s_moduleClockTableSize; i++)
+ {
+ s_moduleClockState[i].actual_freq = QT_MODULE_KHZ;
+ }
+ }
+ ModuleClockStateInit(hRmDevice);
+ ScaledClockConfigInit(hRmDevice);
+
+ /* debug info... print out some initial frequencies */
+ {
+ NvU32 freq;
+
+ if (NvRmPrivGetClockSourceHandle(NvRmClockSource_PllX0))
+ {
+ NvOsDebugPrintf("NVRM CLOCKS: PLLX0: %d Khz\n",
+ s_ClockSourceFreq[NvRmClockSource_PllX0]);
+ }
+ NvOsDebugPrintf("NVRM CLOCKS: PLLM0: %d Khz\n",
+ s_ClockSourceFreq[NvRmClockSource_PllM0]);
+ NvOsDebugPrintf("NVRM CLOCKS: PLLC0: %d Khz\n",
+ s_ClockSourceFreq[NvRmClockSource_PllC0]);
+ NvOsDebugPrintf("NVRM CLOCKS: PLLP0: %d Khz\n",
+ s_ClockSourceFreq[NvRmClockSource_PllP0]);
+ NvOsDebugPrintf("NVRM CLOCKS: PLLA0: %d Khz\n",
+ s_ClockSourceFreq[NvRmClockSource_PllA0]);
+ NvOsDebugPrintf("NVRM CLOCKS: CPU: %d Khz\n",
+ s_ClockSourceFreq[NvRmClockSource_CpuBus]);
+ NvOsDebugPrintf("NVRM CLOCKS: AVP: %d Khz\n",
+ s_ClockSourceFreq[NvRmClockSource_SystemBus]);
+ NvOsDebugPrintf("NVRM CLOCKS: System Bus: %d Khz\n",
+ s_ClockSourceFreq[NvRmClockSource_SystemBus]);
+
+ NV_ASSERT_SUCCESS(NvRmPowerModuleClockConfig(
+ hRmDevice, NvRmPrivModuleID_MemoryController,
+ 0, 0, 0, NULL, 0, &freq, 0));
+ NvOsDebugPrintf("NVRM CLOCKS: Memory Controller: %d\n", freq);
+
+ NV_ASSERT_SUCCESS(NvRmPowerModuleClockConfig(
+ hRmDevice, NvRmPrivModuleID_ExternalMemoryController,
+ 0, 0, 0, NULL, 0, &freq, 0));
+ NvOsDebugPrintf("NVRM CLOCKS: External Memory Controller: %d\n", freq);
+ }
+
+ return NvSuccess;
+
+fail:
+ NvOsFree(s_moduleClockState);
+ s_moduleClockState = NULL;
+ NvOsMutexDestroy(s_hPllMutex);
+ s_hPllMutex = NULL;
+ NvOsMutexDestroy(s_hClockMutex);
+ s_hClockMutex = NULL;
+ return e;
+}
+
+void
+NvRmPrivClocksDeinit(NvRmDeviceHandle hRmDevice)
+{
+ NV_ASSERT(hRmDevice);
+
+ if (s_moduleClockState != NULL)
+ {
+ // TODO: check refrence counts for "clock leakage"
+ }
+ NvOsFree(s_moduleClockState);
+ s_moduleClockState = NULL;
+ NvOsMutexDestroy(s_hPllMutex);
+ s_hPllMutex = NULL;
+ NvOsMutexDestroy(s_hClockMutex);
+ s_hClockMutex = NULL;
+}
+
+void
+NvRmPrivBoostClocks(NvRmDeviceHandle hRmDevice)
+{
+ NvRmFreqKHz FreqKHz;
+
+ // Initialize core voltage control
+ NvRmPrivDvsInit();
+
+ // Configure fast memory and core clocks (nominal core, CPU and memory
+ // voltages are already set by this time during PMU initialization)
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ {
+ NvRmPrivAp15FastClockConfig(hRmDevice);
+ }
+ else if (hRmDevice->ChipId.Id == 0x20)
+ {
+ NvRmPrivAp20FastClockConfig(hRmDevice);
+ }
+
+ // Print fast clocks
+ NvOsDebugPrintf("ADJUSTED CLOCKS:\n");
+ NV_ASSERT_SUCCESS(NvRmPowerModuleClockConfig(
+ hRmDevice, NvRmPrivModuleID_MemoryController,
+ 0, 0, 0, NULL, 0, &FreqKHz, 0));
+ NvOsDebugPrintf("MC clock is set to %6d KHz\n", FreqKHz);
+
+ NV_ASSERT_SUCCESS(NvRmPowerModuleClockConfig(
+ hRmDevice, NvRmPrivModuleID_ExternalMemoryController,
+ 0, 0, 0, NULL, 0, &FreqKHz, 0));
+ NvOsDebugPrintf("EMC clock is set to %6d KHz (DDR clock is at %6d KHz)\n",
+ FreqKHz, FreqKHz/2);
+
+ if (NvRmPrivGetClockSourceHandle(NvRmClockSource_PllX0))
+ {
+ FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllX0);
+ NvOsDebugPrintf("PLLX0 clock is set to %6d KHz\n", FreqKHz);
+ }
+ FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllC0);
+ NvOsDebugPrintf("PLLC0 clock is set to %6d KHz\n", FreqKHz);
+ FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_CpuBus);
+ NvOsDebugPrintf("CPU clock is set to %6d KHz\n", FreqKHz);
+ FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_SystemBus);
+ NvOsDebugPrintf("System and AVP clock is set to %6d KHz\n", FreqKHz);
+
+ // Print GPU clocks
+ #define DEBUG_PRINT_MODULE_CLOCK(Name) \
+ do\
+ {\
+ NV_ASSERT_SUCCESS(NvRmPowerModuleClockConfig( \
+ hRmDevice, NvRmModuleID_##Name, 0, 0, 0, NULL, 0, &FreqKHz, 0)); \
+ NvOsDebugPrintf(#Name " clock is set to %6d KHz\n", FreqKHz); \
+ } while (0)
+
+ DEBUG_PRINT_MODULE_CLOCK(GraphicsHost);
+ DEBUG_PRINT_MODULE_CLOCK(3D);
+ DEBUG_PRINT_MODULE_CLOCK(2D);
+ DEBUG_PRINT_MODULE_CLOCK(Epp);
+ DEBUG_PRINT_MODULE_CLOCK(Mpe);
+ DEBUG_PRINT_MODULE_CLOCK(Vde);
+ #undef DEBUG_PRINT_MODULE_CLOCK
+}
+
+typedef struct NvRmPllRailMapRec
+{
+ // PLL Clock Source Id
+ NvRmClockSource PllId;
+
+ // Power rail GUID
+ NvU64 PllRailId;
+} NvRmPllRailMap;
+
+static const NvRmPllRailMap s_PllRailMap[] =
+{
+ { NvRmClockSource_ClkM, NV_VDD_OSC_ODM_ID},
+ { NvRmClockSource_PllA1, NV_VDD_PLLA_ODM_ID},
+ { NvRmClockSource_PllC0, NV_VDD_PLLC_ODM_ID},
+ { NvRmClockSource_PllD0, NV_VDD_PLLD_ODM_ID},
+ { NvRmClockSource_PllM0, NV_VDD_PLLM_ODM_ID},
+ { NvRmClockSource_PllP0, NV_VDD_PLLP_ODM_ID},
+ { NvRmClockSource_PllU0, NV_VDD_PLLU1_ODM_ID},
+ { NvRmClockSource_PllX0, NV_VDD_PLLX_ODM_ID},
+};
+
+void
+NvRmPrivPllRailsInit(NvRmDeviceHandle hRmDevice)
+{
+ NvU32 i;
+
+ for (i = 0; i < NV_ARRAY_SIZE(s_PllRailMap); i++)
+ {
+ NvU64 PllRailId = s_PllRailMap[i].PllRailId;
+ NvRmClockSource PllId = s_PllRailMap[i].PllId;
+ switch (PllId)
+ {
+ // If present PLLX is treated as other boot PLLs
+ case NvRmClockSource_PllX0:
+ if (!NvRmPrivGetClockSourceHandle(NvRmClockSource_PllX0))
+ break;
+ // fall through
+
+ // Oscillator and boot PLLs are already running - turn the
+ // respective rails On, anyway, to sync ref count
+ case NvRmClockSource_ClkM:
+ case NvRmClockSource_PllC0:
+ case NvRmClockSource_PllM0:
+ case NvRmClockSource_PllP0:
+ NvRmPrivPmuRailControl(hRmDevice, PllRailId, NV_TRUE);
+ break;
+
+ // If PLLA rail is turned On by BL - update ref count, otherwise
+ // turn rail On, but leave PLLA disabled
+ case NvRmClockSource_PllA1:
+ if (NvRmPrivPmuRailGetVoltage(hRmDevice, PllRailId) == 0)
+ {
+ NvRmPrivAp15PllSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(PllId)->pInfo.pPll,
+ 0, 0, 0, (NvU32)-1, 0, 0, NV_TRUE, 0);
+ }
+ NvRmPrivPmuRailControl(hRmDevice, PllRailId, NV_TRUE);
+ break;
+
+ // If PLLD rail is turned On by BL - update ref count, otherwise
+ // keep it Off and disable PLLD; initialize PLLD rail status
+ case NvRmClockSource_PllD0:
+ if (NvRmPrivPmuRailGetVoltage(hRmDevice, PllRailId) != 0)
+ {
+ s_MipiPllVddOn = NV_TRUE;
+ NvRmPrivPmuRailControl(hRmDevice, PllRailId, NV_TRUE);
+ }
+ else
+ {
+ s_MipiPllVddOn = NV_FALSE;
+ NvRmPrivAp15PllSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(PllId)->pInfo.pPll,
+ 0, 0, 0, (NvU32)-1, 0, 0, NV_TRUE, 0);
+ }
+ break;
+
+ // PLLU rail is controlled by USB stack - don't touch it, unless
+ // USB download transport is active. In the latter case update ref
+ // counts for PLLU and USB power rails
+ case NvRmClockSource_PllU0:
+ if (NvRmPrivGetDownloadTransport(hRmDevice) ==
+ NvOdmDownloadTransport_Usb)
+ {
+ NvRmPrivPmuRailControl(hRmDevice, PllRailId, NV_TRUE);
+ NvRmPrivPmuRailControl(hRmDevice, NV_VDD_USB_ODM_ID, NV_TRUE);
+ }
+ break;
+
+ default:
+ NV_ASSERT(!"Invalid Id");
+ }
+ }
+}
+
+void
+NvRmPrivClocksResume(NvRmDeviceHandle hRmDevice)
+{
+ // Sync clock sources after LP0
+ NvRmPrivClockSourceFreqInit(hRmDevice, s_ClockSourceFreq);
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ NvRmPrivAp15FastClockConfig(hRmDevice);
+ // else if (hRmDevice->ChipId.Id == 0x20)
+ // TODO: NvRmPrivAp20FastClockConfig(hRmDevice);
+
+}
+
+/*****************************************************************************/
+
+NvRmFreqKHz
+NvRmPrivGetInterfaceMaxClock(NvRmDeviceHandle hRmDevice, NvRmModuleID ModuleId)
+{
+
+ NvU32 OdmModules[4];
+ NvU32 OdmInstances[4];
+ NvU32* pMaxClockSpeed = NULL;
+ NvU32 count = 0;
+ NvU32 i = 0;
+ NvU32 instance = 0;
+ NvU32 NumOdmModules = 0;
+ NvU32 MaxFreq = 0;
+
+ MaxFreq = NvRmFreqMaximum;
+
+ NumOdmModules = NvRmPrivRmModuleToOdmModule(hRmDevice->ChipId.Id,
+ ModuleId, (NvOdmIoModule *)OdmModules, OdmInstances);
+
+ for(i = 0; i < NumOdmModules; i++)
+ {
+ instance = OdmInstances[i];
+ NvOdmQueryClockLimits(OdmModules[i], (const NvU32 **)&pMaxClockSpeed, &count);
+ if ((pMaxClockSpeed) && (instance < count))
+ {
+ MaxFreq = pMaxClockSpeed[instance];
+ }
+ }
+
+ return MaxFreq;
+}
+
+NvRmFreqKHz
+NvRmPrivModuleGetMaxSrcKHz(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmModuleClockInfo* cinfo)
+{
+ NvU32 i;
+ NvRmFreqKHz SourceClockFreq = 0;
+
+ for (i=0; i < NvRmClockSource_Num; i++)
+ {
+ NV_ASSERT(cinfo->Sources[i] < NvRmClockSource_Num);
+ if (cinfo->Sources[i] == NvRmClockSource_Invalid)
+ break;
+ if (NvRmPrivIsSourceProtected(
+ hRmDevice, cinfo->Module, cinfo->Sources[i]))
+ continue;
+ SourceClockFreq =
+ NV_MAX(SourceClockFreq, s_ClockSourceFreq[(cinfo->Sources[i])]);
+ }
+ return SourceClockFreq;
+}
+
+NvError
+NvRmPowerModuleClockConfig (
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId,
+ NvU32 ClientId,
+ NvRmFreqKHz MinFreq,
+ NvRmFreqKHz MaxFreq,
+ const NvRmFreqKHz* PrefFreqList,
+ NvU32 PrefFreqListCount,
+ NvRmFreqKHz* CurrentFreq,
+ NvU32 flags)
+{
+ NvError err = NvSuccess;
+ NvRmModuleClockInfo *cinfo = NULL;
+ NvU32 divisor = 0x0;
+ NvU32 reg = 0x0;
+ NvRmFreqKHz f, SourceClockFreq;
+ ExecPlatform env;
+ NvRmModuleClockState *state;
+ NvRmMilliVolts v = NvRmVoltsOff;
+ NvRmModuleID ModuleName = NVRM_MODULE_ID_MODULE( ModuleId );
+ NvU32 MaxInterfaceClock = 0;
+
+ NvBool DiagMode = NvRmPrivIsDiagMode(ModuleId);
+
+ /* validate the Rm Handle */
+ NV_ASSERT(hDevice);
+ env = NvRmPrivGetExecPlatform(hDevice);
+
+ // Get pointers to module clock info and current module clock state
+ err = NvRmPrivGetClockState(hDevice, ModuleId, &cinfo, &state);
+ if (err != NvSuccess)
+ return err;
+
+ if ((flags & NvRmClockConfig_SubConfig) &&
+ ((ModuleName == NvRmModuleID_Spdif) ||
+ (ModuleName == NvRmModuleID_Vi) ||
+ (ModuleName == NvRmModuleID_Tvo)))
+ {
+ // Module subclock is to be configured. Use subclock descriptor
+ // and subclock state (located immediately after main descriptor,
+ // and state, respectively)
+ state++;
+ cinfo++;
+ NV_ASSERT(cinfo->Module == ModuleName);
+ NV_ASSERT(cinfo->SubClockId == 1);
+ }
+ else if (PrefFreqList && (PrefFreqList[0] == NvRmFreqMaximum) &&
+ ((ModuleName == NvRmModuleID_2D) ||
+ (ModuleName == NvRmModuleID_Epp) ||
+ (ModuleName == NvRmModuleID_GraphicsHost)))
+ {
+ // Maximum frequency for these modules is synchronized with EMC
+ f = NvRmPrivGetEmcSyncFreq(hDevice, ModuleName);
+ if (f == state->actual_freq)
+ {
+ if (CurrentFreq)
+ *CurrentFreq = f;
+ return err; // already in sync
+ }
+ MaxFreq = f + 1; // 1 kHz margin
+ }
+ else if (PrefFreqList &&
+ ((ModuleName == NvRmModuleID_Vde) ||
+ (ModuleName == NvRmPrivModuleID_MemoryController) ||
+ (ModuleName == NvRmPrivModuleID_ExternalMemoryController)))
+ { // CPU, AVP are not allowed too, but failed get state if tried
+ NV_ASSERT(!"MC/EMC, VDE clock configuration is not allowed here");
+ return NvError_NotSupported;
+ }
+
+ // Clip frequency boundaries to h/w limitations
+ if (PrefFreqList)
+ {
+ const NvRmModuleClockLimits* pClimits =
+ NvRmPrivGetSocClockLimits(cinfo->Module);
+ if ((MinFreq == NvRmFreqUnspecified) ||
+ (MinFreq < pClimits->MinKHz))
+ {
+ MinFreq = pClimits->MinKHz;
+ }
+ MaxInterfaceClock = NV_MIN(pClimits->MaxKHz,
+ NvRmPrivGetInterfaceMaxClock(hDevice, ModuleId));
+ if ((MaxFreq == NvRmFreqUnspecified) ||
+ (MaxFreq > MaxInterfaceClock))
+ {
+ MaxFreq = MaxInterfaceClock;
+ }
+ }
+
+#if NVRM_DIAG_LOCK_SUPPORTED
+ // Check/set individual diag lock for this clock only
+ DiagMode |= state->DiagLock;
+ if (flags & NvRmClockConfig_DiagLock)
+ state->DiagLock = NV_TRUE;
+#endif
+
+ /*
+ * Check if voltage scaling is required before module clock is configured.
+ * Core voltage access is shared with DVFS. Display clock configuration
+ * also affects PLLs shared with DVFS and involves PLLD power control. In
+ * any case PMU access transport must *not* be scalable (PMU transport API
+ * must be called outside clock mutex).
+ */
+ if (PrefFreqList && (!DiagMode) &&
+ ((ModuleName == NvRmModuleID_Display) ||
+ (ModuleName == NvRmModuleID_Dsi) || state->Vscale))
+ {
+ NvOsMutexLock(s_hPllMutex);
+
+ // Display configuration always at nominal voltage. UART divider is not
+ // in CAR, and clock state contains source, rather than UART frequency.
+ // Hence, get ready for fastest clock. For other modules use maximum of
+ // target and current frequency. Make sure voltage is high enough for
+ // maximum module source frequency.
+ if ((ModuleName == NvRmModuleID_Display) ||
+ (ModuleName == NvRmModuleID_Dsi))
+ {
+ NvRmPrivPllDPowerControl(hDevice, NV_TRUE, &s_MipiPllVddOn);
+ v = NvRmVoltsMaximum;
+ }
+ else
+ {
+ if (ModuleName == NvRmModuleID_Uart)
+ f = NvRmFreqMaximum;
+ else
+ f = NV_MAX(MaxFreq, state->actual_freq);
+
+ SourceClockFreq = NvRmPrivModuleGetMaxSrcKHz(hDevice, cinfo);
+ v = NvRmPrivModuleVscaleReAttach(
+ hDevice, cinfo, state, f, SourceClockFreq);
+ }
+ NvRmPrivDvsRequest(v);
+ }
+
+ NvOsMutexLock(s_hClockMutex);
+ {
+ if (env == ExecPlatform_Fpga || env == ExecPlatform_Qt)
+ {
+ // Clock configuration only supported for the i2s, VI, i2c,
+ // dvc and HSMMC on this environment
+ if (!(ModuleName == NvRmModuleID_I2s ||
+ ModuleName == NvRmModuleID_Vi ||
+ ModuleName == NvRmModuleID_Dvc ||
+ ModuleName == NvRmModuleID_I2c ||
+ ModuleName == NvRmModuleID_Hsmmc ||
+ ModuleName == NvRmModuleID_OneWire
+ ))
+ {
+ // Return actual display clock only on FPGA
+ if ((env == ExecPlatform_Fpga) &&
+ (ModuleName == NvRmModuleID_Display))
+ {
+ state->actual_freq = FPGA_DISPLAY_KHZ;
+ }
+
+ goto end;
+ }
+ }
+ if (PrefFreqList && (!DiagMode))
+ {
+ if ((ModuleName != NvRmModuleID_Dsi) &&
+ (ModuleName != NvRmModuleID_Usb2Otg))
+ NV_ASSERT(cinfo->SourceFieldMask || cinfo->DivisorFieldMask);
+
+ // Get the best module source clock and divider
+ err = NvRmFindBestClockSource(hDevice, cinfo, NvRmClockSource_Num,
+ MinFreq, MaxFreq, PrefFreqList, PrefFreqListCount, state, flags);
+ if (err != NvSuccess)
+ {
+ goto leave;
+ }
+ NV_ASSERT(state->SourceClock <= cinfo->SourceFieldMask);
+ if ((ModuleName != NvRmModuleID_Dsi) &&
+ (ModuleName != NvRmModuleID_Usb2Otg))
+ {
+ // Set new clock state
+ NvRmPrivModuleClockSet(hDevice, cinfo, state);
+ if ((ModuleName == NvRmModuleID_Tvo) &&
+ (cinfo->SubClockId == 1)) // if CVE - sync TVDAC
+ {
+ NV_ASSERT(((cinfo + 1)->Module == NvRmModuleID_Tvo) &&
+ ((cinfo + 1)->SubClockId == 2));
+ *(state + 1) = *state;
+ NvRmPrivModuleClockSet(hDevice, (cinfo + 1), state);
+ }
+ NvRmPrivModuleClockReAttach(hDevice, cinfo, state);
+ NvRmPrivDisablePLLs(hDevice, cinfo, state);
+ }
+ // FIXME is this a hack just for the AP15 FPGA
+ // Special treatment to the i2s on the fpga to do the workaround
+ // for the i2s recording, the clock source to i2s should be less than
+ // the system clock frequency 8.33MHz for the fpga, so dividing by 2
+ // if its more than
+ if ((hDevice->ChipId.Id == 0x15 || hDevice->ChipId.Id == 0x16) &&
+ (env == ExecPlatform_Fpga) && (ModuleName == NvRmModuleID_I2s))
+ {
+ reg = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ cinfo->ClkSourceOffset);
+ if (!(reg & 0x7f))
+ {
+ reg |= 1;
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ cinfo->ClkSourceOffset, reg);
+ state->actual_freq = state->actual_freq/2;
+ }
+ }
+ // Hack: on FPGA OneWire divider is implemented as integer divider
+ // (on SoC it is fractional divider)
+ if ((env == ExecPlatform_Fpga) &&
+ (ModuleName == NvRmModuleID_OneWire))
+ {
+ reg = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ cinfo->ClkSourceOffset);
+ reg &= ~(cinfo->DivisorFieldMask << cinfo->DivisorFieldShift);
+ reg |= (state->Divider >> 1) << cinfo->DivisorFieldShift;
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ cinfo->ClkSourceOffset, reg);
+ }
+ }
+ else // No target list just update state from h/w and return current frequency
+ {
+ if (cinfo->SourceFieldMask != 0)
+ {
+ NV_ASSERT(cinfo->ClkSourceOffset);
+ state->SourceClock = NV_REGR(
+ hDevice, NvRmPrivModuleID_ClockAndReset, 0, cinfo->ClkSourceOffset);
+ state->SourceClock >>= cinfo->SourceFieldShift;
+ state->SourceClock &= cinfo->SourceFieldMask;
+ SourceClockFreq = s_ClockSourceFreq[(cinfo->Sources[state->SourceClock])];
+ }
+ else
+ {
+ // If source is Fixed source always at index 0
+ SourceClockFreq = s_ClockSourceFreq[(cinfo->Sources[0])];
+ }
+ if ((ModuleName == NvRmPrivModuleID_MemoryController) ||
+ (ModuleName == NvRmPrivModuleID_ExternalMemoryController))
+ NvRmPrivMemoryClockReAttach(hDevice, cinfo, state);
+ else
+ NvRmPrivModuleClockReAttach(hDevice, cinfo, state);
+
+ if ( cinfo->Divider != NvRmClockDivider_None )
+ {
+ NV_ASSERT(cinfo->ClkSourceOffset);
+ state->Divider = NV_REGR(
+ hDevice, NvRmPrivModuleID_ClockAndReset, 0, cinfo->ClkSourceOffset);
+ state->Divider >>= cinfo->DivisorFieldShift;
+ state->Divider &= cinfo->DivisorFieldMask;
+
+ divisor = state->Divider;
+ if (cinfo->Divider == NvRmClockDivider_Integer_1)
+ {
+ divisor += 1;
+ }
+ else if (cinfo->Divider == NvRmClockDivider_Fractional_2)
+ {
+ divisor += 2;
+ SourceClockFreq = (SourceClockFreq << 1);
+ }
+ else if (cinfo->Divider == NvRmClockDivider_Integer_2)
+ {
+ divisor += 2;
+ }
+ }
+ else
+ {
+ state->Divider = 1;
+ divisor = 1;
+ }
+ NV_ASSERT(divisor);
+ state->actual_freq = SourceClockFreq / divisor;
+ }
+
+ /*
+ * VI and I2S has some special bits in the clock register
+ */
+ NvRmPrivAp15ClockConfigEx(
+ hDevice, ModuleName, cinfo->ClkSourceOffset, flags);
+
+
+ /*
+ * SDMMC internal feedback tap delay adjustment
+ * This is required for the ap20 based boards.
+ */
+ if ((PrefFreqListCount) && (hDevice->ChipId.Id == 0x20) &&
+ (ModuleName == NvRmModuleID_Sdio))
+ {
+ NvRmPrivAp20SdioTapDelayConfigure(hDevice, ModuleId,
+ cinfo->ClkSourceOffset, state->actual_freq);
+ }
+ }
+
+end:
+ if (CurrentFreq)
+ {
+ *CurrentFreq = state->actual_freq;
+ }
+leave:
+ NvOsMutexUnlock(s_hClockMutex);
+ if (PrefFreqList && (!DiagMode) &&
+ ((ModuleName == NvRmModuleID_Display) ||
+ (ModuleName == NvRmModuleID_Dsi) || state->Vscale))
+ {
+ // Tune voltage level to the actually configured frequency; for Display
+ // and UART, use maximum requested frequency. Make sure voltage is
+ // updated after display configuration, which may change DVFS clocks.
+ SourceClockFreq =
+ s_ClockSourceFreq[(cinfo->Sources[state->SourceClock])];
+ if (ModuleName == NvRmModuleID_Display)
+ {
+ NvRmPrivPllDPowerControl(hDevice, NV_FALSE, &s_MipiPllVddOn);
+ v = NvRmPrivModuleVscaleReAttach(
+ hDevice, cinfo, state, MaxFreq, SourceClockFreq);
+ v = NvRmVoltsOff; // Guarantees voltage update
+ }
+ else if (ModuleName == NvRmModuleID_Dsi)
+ {
+ NvRmPrivPllDPowerControl(hDevice, NV_FALSE, &s_MipiPllVddOn);
+ f = state->actual_freq;
+ v = NvRmPrivModuleVscaleReAttach(
+ hDevice, cinfo, state, f, SourceClockFreq);
+ v = NvRmVoltsOff; // Guarantees voltage update
+ }
+ else
+ {
+ if (ModuleName == NvRmModuleID_Uart)
+ f = MaxFreq;
+ else
+ f = state->actual_freq;
+ v = NvRmPrivModuleVscaleReAttach(
+ hDevice, cinfo, state, f, SourceClockFreq);
+ }
+ NvRmPrivDvsRequest(v);
+ NvOsMutexUnlock(s_hPllMutex);
+ }
+ return err;
+}
+
+/*****************************************************************************/
+
+NvRmClockSource
+NvRmPrivCoreClockSourceGet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmCoreClockInfo* pCinfo)
+{
+ NvU32 i, reg;
+ NvU32 ModeField;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo);
+
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->SelectorOffset);
+ ModeField = (reg >> pCinfo->ModeFieldShift) & pCinfo->ModeFieldMask;
+ if (ModeField == 0)
+ {
+ // One fixed 32kHz clock source, if mode field is cleared
+ return NvRmClockSource_ClkS;
+ }
+ // Selected Clock Mode = 1 + LOG2(mode field)
+ for (i = 0; ModeField != 0; ModeField >>= 1, i++);
+ NV_ASSERT(i < NvRmCoreClockMode_Num);
+
+ // Source selection index = source field value for currently selected mode
+ reg = (reg >> pCinfo->SourceFieldShifts[i]) & pCinfo->SourceFieldMasks[i];
+ NV_ASSERT(reg < NvRmClockSource_Num);
+
+ return pCinfo->Sources[reg];
+}
+
+NvRmFreqKHz
+NvRmPrivCoreClockFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmCoreClockInfo* pCinfo)
+{
+ NvU32 reg, n, m;
+ NvRmFreqKHz ClkFreq;
+ NvRmClockSource ClkSrcId;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo);
+
+ // Get source frequency
+ ClkSrcId = NvRmPrivCoreClockSourceGet(hRmDevice, pCinfo);
+ ClkFreq = s_ClockSourceFreq[ClkSrcId];
+ NV_ASSERT(ClkFreq);
+
+ // Get divider settings and calculate clock frequency
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->DividerOffset);
+ m = (reg >> pCinfo->DividendFieldShift) & pCinfo->DividendFieldMask;
+ n = (reg >> pCinfo->DivisorFieldShift) & pCinfo->DivisorFieldMask;
+ if ((reg >> pCinfo->DividerEnableFiledShift) & pCinfo->DividerEnableFiledMask)
+ {
+ if (m < n) // if enabled and dividend below divisor
+ {
+ if (n == pCinfo->DivisorFieldMask)
+ {
+ // special divisor DFS is using
+ ClkFreq = (ClkFreq * (m + 1)) >> pCinfo->DivisorFieldSize;
+ }
+ else
+ {
+ // initially may be general divisor
+ ClkFreq = (ClkFreq * (m + 1)) / (n + 1);
+ }
+ }
+ }
+ return ClkFreq;
+}
+
+static void
+CoreClockSwitch(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmCoreClockInfo* pCinfo,
+ NvU32 SourceIndex,
+ NvU32 Divider,
+ NvBool SrcFirst,
+ NvRmFreqKHz CoreFreq)
+{
+ NvU32 reg;
+
+ // Construct core source control register settings.
+ // Always use Idle clock mode; mode field = 2 ^ (Mode - 1)
+ NV_ASSERT(pCinfo->SelectorOffset);
+ NV_ASSERT(SourceIndex <= pCinfo->SourceFieldMasks[NvRmCoreClockMode_Idle]);
+
+ reg = ( ((0x1 << (NvRmCoreClockMode_Idle - 1)) << pCinfo->ModeFieldShift) |
+ (SourceIndex << pCinfo->SourceFieldShifts[NvRmCoreClockMode_Idle]) );
+
+ if (reg != NV_REGR(
+ hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->SelectorOffset))
+ {
+ // Update PLL reference
+ NvRmPrivCoreClockReAttach(
+ hRmDevice, pCinfo->SourceId, pCinfo->Sources[SourceIndex]);
+ }
+
+ // Switch source and divider according to specified order. This guarantees
+ // that core frequency stays below maximum of "old" and "new" settings.
+ // Configure EMC LL path before and after clock switch.
+ if (pCinfo->SourceId == NvRmClockSource_CpuBus)
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ NvRmPrivAp15SetEmcForCpuSrcSwitch(hRmDevice);
+ if (SrcFirst)
+ {
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->SelectorOffset, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ }
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->DividerOffset, Divider);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ if (!SrcFirst)
+ {
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->SelectorOffset, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ }
+ if (pCinfo->SourceId == NvRmClockSource_CpuBus)
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ NvRmPrivAp15SetEmcForCpuDivSwitch(hRmDevice, CoreFreq, NV_FALSE);
+}
+
+void
+NvRmPrivCoreClockSourceIndexFind(
+ const NvRmCoreClockInfo* pCinfo,
+ NvRmClockSource SourceId,
+ NvU32* pSourceIndex)
+{
+ NvU32 i;
+ NV_ASSERT(pSourceIndex && pCinfo);
+ *pSourceIndex = NvRmClockSource_Num; // source index out of valid range
+
+ // Find core descriptor index for the specified clock source
+ for (i = 0; i < NvRmClockSource_Num; i++)
+ {
+ if (pCinfo->Sources[i] == SourceId)
+ {
+ *pSourceIndex = i;
+ break;
+ }
+ }
+}
+
+void
+NvRmPrivCoreClockBestSourceFind(
+ const NvRmCoreClockInfo* pCinfo,
+ NvRmFreqKHz MaxFreq,
+ NvRmFreqKHz TargetFreq,
+ NvRmFreqKHz* pSourceFreq,
+ NvU32* pSourceIndex)
+{
+ NvU32 i;
+ NvRmFreqKHz SrcFreq = 0;
+ NvRmFreqKHz BestSrcFreq = 0;
+ NvU32 SrcIndex = NvRmClockSource_Num; // source index out of valid range
+
+ NV_ASSERT(pSourceFreq && pSourceIndex && pCinfo);
+
+ /*
+ * Find valid source with frequency closest to the requested one from
+ * the above; if such source does not exist, find source with frequency
+ * closest to the requested one from the below
+ */
+ for (i = 0; i < NvRmClockSource_Num; i++)
+ {
+ SrcFreq = s_ClockSourceFreq[pCinfo->Sources[i]];
+ if (SrcFreq == 0)
+ continue;
+ if (SrcFreq <= MaxFreq)
+ {
+ if (((BestSrcFreq < SrcFreq) && (BestSrcFreq < TargetFreq)) ||
+ ((BestSrcFreq >= SrcFreq) && (SrcFreq >= TargetFreq)))
+ {
+ SrcIndex = i;
+ BestSrcFreq = SrcFreq;
+ }
+ }
+ }
+ *pSourceIndex = SrcIndex;
+ *pSourceFreq = BestSrcFreq;
+}
+
+NvError
+NvRmPrivCoreClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmCoreClockInfo* pCinfo,
+ NvRmFreqKHz MaxFreq,
+ NvRmFreqKHz* pFreq,
+ NvRmClockSource* pSourceId)
+{
+ NvU32 m, n, reg;
+ NvBool SrcFirst;
+ NvRmFreqKHz ClkFreq;
+ NvRmFreqKHz SrcFreq = 0;
+ NvU32 SrcIndex = NvRmClockSource_Num; // source index out of valid range
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pFreq && pSourceId && pCinfo);
+ NV_ASSERT(*pSourceId < NvRmClockSource_Num);
+
+ // 0 kHz is not achievable, anyway; changing target to 1 kHz will result in
+ // minimum configurable frequency
+ ClkFreq = *pFreq;
+ if (ClkFreq == 0)
+ ClkFreq = 1;
+
+ /*
+ * If no valid source explicitly specified by the caller, determine the
+ * best clock source for the requested frequency. Otherwise, just use the
+ * requested source.
+ */
+ if (*pSourceId == NvRmClockSource_Invalid)
+ {
+ NvRmPrivCoreClockBestSourceFind(
+ pCinfo, MaxFreq, ClkFreq, &SrcFreq, &SrcIndex);
+ }
+ else
+ {
+ SrcFreq = s_ClockSourceFreq[*pSourceId];
+ if (SrcFreq <= MaxFreq)
+ {
+ NvRmPrivCoreClockSourceIndexFind(pCinfo, *pSourceId, &SrcIndex);
+ }
+ }
+ if (SrcIndex >= NvRmClockSource_Num)
+ {
+ // Could not find source
+ return NvError_NotSupported;
+ }
+ NV_ASSERT(SrcFreq);
+
+ /*
+ * Determine super divider settings and enable divider if necessary. Always
+ * use maximum possible divisor n = divisor mask, so n+1 = 2^(divisor size).
+ * Hence, Fout = Fin * (m+1) / (n+1) = (Fin * (m+1)) >> (divisor size), and
+ * respectively, m = ((Fout << (divisor size)) / Fin) - do not subtract 1
+ * as integer division would round down, anyway. Determine switching order:
+ * switch source 1st if new divider quotient is bigger than the old one.
+ */
+ n = pCinfo->DivisorFieldMask;
+ m = (ClkFreq << pCinfo->DivisorFieldSize) / (SrcFreq + 1);
+ if ((m < n) && (m <= pCinfo->DividendFieldMask))
+ {
+ NvU32 m_old, n_old;
+ SrcFirst = NV_FALSE;
+ reg = NV_REGR(
+ hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->DividerOffset);
+ if ( ((reg >> pCinfo->DividerEnableFiledShift) &
+ pCinfo->DividerEnableFiledMask) == pCinfo->DividerEnableFiledMask )
+ {
+ m_old = (reg >> pCinfo->DividendFieldShift) & pCinfo->DividendFieldMask;
+ n_old = (reg >> pCinfo->DivisorFieldShift) & pCinfo->DivisorFieldMask;
+ if ( ((m + 1) * (n_old + 1)) > ((n + 1) * (m_old + 1)) )
+ SrcFirst = NV_TRUE;
+ }
+ reg = (pCinfo->DividerEnableFiledMask << pCinfo->DividerEnableFiledShift) |
+ (m << pCinfo->DividendFieldShift) | (n << pCinfo->DivisorFieldShift);
+ // return actual clock frequency from the divider
+ *pFreq = (SrcFreq * (m + 1)) >> pCinfo->DivisorFieldSize;
+ }
+ else
+ {
+ SrcFirst = NV_TRUE;
+ reg = 0; // clear = disable divider
+ // return actual clock frequency from the source directly
+ *pFreq = SrcFreq;
+ }
+ // Finally set new core clock
+ CoreClockSwitch(hRmDevice, pCinfo, SrcIndex, reg, SrcFirst, *pFreq);
+
+ // return selected source id and update core bus frequency
+ *pSourceId = pCinfo->Sources[SrcIndex];
+ s_ClockSourceFreq[pCinfo->SourceId] = *pFreq;
+ if ((pCinfo->SourceId == NvRmClockSource_CpuBus) &&
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBridge))
+ {
+ s_ClockSourceFreq[NvRmClockSource_CpuBridge] = NvRmPrivDividerFreqGet(
+ hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBridge)->pInfo.pDivider);
+ }
+ return NvSuccess;
+}
+
+void
+NvRmPrivCoreClockSet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmCoreClockInfo* pCinfo,
+ NvRmClockSource SourceId,
+ NvU32 m,
+ NvU32 n)
+{
+ NvU32 reg;
+ NvBool SrcFirst;
+ NvRmFreqKHz CoreFreq = 0;
+ NvU32 SrcIndex = NvRmClockSource_Num; // source index out of valid range
+ ExecPlatform env;
+
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo);
+
+ env = NvRmPrivGetExecPlatform(hRmDevice);
+
+ if (env == ExecPlatform_Fpga)
+ return;
+
+ NvRmPrivCoreClockSourceIndexFind(pCinfo, SourceId, &SrcIndex);
+ NV_ASSERT(SrcIndex < NvRmClockSource_Num);
+
+ /*
+ * Set divide: just cut off MSbits out of dividend and divisor range, and
+ * enable divider if m/n ration is below 1. Update new core frequency.
+ * Determine switching order: switch source 1st if new divider quotient is
+ * bigger than the old one.
+ */
+ m &= pCinfo->DividendFieldMask;
+ n &= pCinfo->DivisorFieldMask;
+ CoreFreq = s_ClockSourceFreq[SourceId];
+ if (m < n)
+ {
+ NvU32 m_old, n_old;
+ SrcFirst = NV_FALSE;
+ reg = NV_REGR(
+ hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->DividerOffset);
+ if ( ((reg >> pCinfo->DividerEnableFiledShift) &
+ pCinfo->DividerEnableFiledMask) == pCinfo->DividerEnableFiledMask )
+ {
+ m_old = (reg >> pCinfo->DividendFieldShift) & pCinfo->DividendFieldMask;
+ n_old = (reg >> pCinfo->DivisorFieldShift) & pCinfo->DivisorFieldMask;
+ if ( ((m + 1) * (n_old + 1)) > ((n + 1) * (m_old + 1)) )
+ SrcFirst = NV_TRUE;
+ }
+ reg = (pCinfo->DividerEnableFiledMask << pCinfo->DividerEnableFiledShift) |
+ (m << pCinfo->DividendFieldShift) | (n << pCinfo->DivisorFieldShift);
+ CoreFreq = (CoreFreq * (m + 1)) / (n + 1);
+ }
+ else
+ {
+ SrcFirst = NV_TRUE;
+ reg = 0; // clear = disable divider
+ }
+ // Finally set new core clock
+ CoreClockSwitch(hRmDevice, pCinfo, SrcIndex, reg, SrcFirst, CoreFreq);
+
+ // update core bus frequency
+ s_ClockSourceFreq[pCinfo->SourceId] = CoreFreq;
+ if ((pCinfo->SourceId == NvRmClockSource_CpuBus) &&
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBridge))
+ {
+ s_ClockSourceFreq[NvRmClockSource_CpuBridge] = NvRmPrivDividerFreqGet(
+ hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBridge)->pInfo.pDivider);
+ }
+}
+
+/*****************************************************************************/
+
+static NvRmSystemBusComplexInfo*
+GetSystemBusComplexHandle(NvRmDeviceHandle hRmDevice)
+{
+ if (s_SystemBusComplex.BusRateOffset == 0)
+ {
+ NvU32 i, m;
+ const NvRmDividerClockInfo* pAhb =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_Ahb)->pInfo.pDivider;
+ const NvRmDividerClockInfo* pApb =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_Apb)->pInfo.pDivider;
+ NvOsMemset(&s_SystemBusComplex, 0, sizeof(s_SystemBusComplex));
+
+ // Confirm implied fixed AHB and APB dividers configuration and
+ // fill in other AHB and APB dividers parameters
+ NV_ASSERT(pAhb->Divider == NvRmClockDivider_Integer_1);
+ NV_ASSERT(pAhb->ClkControlField == pAhb->ClkDisableSettings);
+ NV_ASSERT(pApb->Divider == NvRmClockDivider_Integer_1);
+ NV_ASSERT(pApb->ClkControlField == pApb->ClkDisableSettings);
+ NV_ASSERT(pAhb->ClkControlOffset == pApb->ClkControlOffset);
+
+ s_SystemBusComplex.BusRateOffset = pAhb->ClkControlOffset;
+ s_SystemBusComplex.BusClockDisableFields =
+ pAhb->ClkControlField | pApb->ClkControlField;
+
+ s_SystemBusComplex.HclkDivisorFieldShift = pAhb->ClkRateFieldShift;
+ s_SystemBusComplex.HclkDivisorFieldMask = pAhb->ClkRateFieldMask;
+ for (i = 0, m = pAhb->ClkRateFieldMask; (m >> i) != 0; i++);
+ s_SystemBusComplex.HclkDivisorFieldSize = i;
+
+ s_SystemBusComplex.PclkDivisorFieldShift = pApb->ClkRateFieldShift;
+ s_SystemBusComplex.PclkDivisorFieldMask = pApb->ClkRateFieldMask;
+ for (i = 0, m = pApb->ClkRateFieldMask; (m >> i) != 0; i++);
+ s_SystemBusComplex.PclkDivisorFieldSize = i;
+
+ // Comfirm implied VDE divider configuration, and fill in VDE divider
+ // parameters provided System bus complex includes VDE clock; otherwise
+ // leave all VDE parameters cleared.
+ if (NvRmPrivGetClockSourceHandle(NvRmClockSource_Vbus))
+ {
+ const NvRmDividerClockInfo* pVbus =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_Vbus)->pInfo.pDivider;
+
+ NV_ASSERT(pVbus->Divider == NvRmClockDivider_Keeper16);
+ NV_ASSERT(pAhb->ClkControlOffset == pVbus->ClkControlOffset);
+
+ s_SystemBusComplex.VclkDividendFieldShift = pVbus->ClkRateFieldShift;
+ s_SystemBusComplex.VclkDividendFieldMask = pVbus->ClkRateFieldMask;
+ for (i = 0, m = pVbus->ClkRateFieldMask; (m >> i) != 0; i++);
+ s_SystemBusComplex.VclkDividendFieldSize = i;
+ }
+ }
+ return &s_SystemBusComplex;
+}
+
+void
+NvRmPrivBusClockFreqSet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz SystemFreq,
+ NvRmFreqKHz* pVclkFreq,
+ NvRmFreqKHz* pHclkFreq,
+ NvRmFreqKHz* pPclkFreq,
+ NvRmFreqKHz PclkMaxFreq)
+{
+ NvU32 VclkDividend, HclkDivisor, PclkDivisor, reg;
+ NvRmFreqKHz ClkFreq;
+ const NvRmSystemBusComplexInfo* pCinfo =
+ GetSystemBusComplexHandle(hRmDevice);
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(SystemFreq);
+ NV_ASSERT(pHclkFreq && pPclkFreq);
+
+ /*
+ * AHB clock divider: Fout = System Frequency / (n+1). Divider settings
+ * n = System Frequency / Hclk Frequency - 1. Avoid division for extreme
+ * cases of very small, or very large request via direct comparison.
+ */
+ ClkFreq = *pHclkFreq;
+ if ((ClkFreq << pCinfo->HclkDivisorFieldSize) <= SystemFreq)
+ {
+ HclkDivisor = pCinfo->HclkDivisorFieldMask;
+ *pHclkFreq = SystemFreq >> pCinfo->HclkDivisorFieldSize;
+ }
+ else if ((ClkFreq << 1) > SystemFreq)
+ {
+ HclkDivisor = 0;
+ *pHclkFreq = SystemFreq;
+ }
+ else
+ {
+ HclkDivisor = (SystemFreq / ClkFreq) - 1;
+ *pHclkFreq = SystemFreq / (HclkDivisor + 1);
+ }
+ s_ClockSourceFreq[NvRmClockSource_Ahb] = *pHclkFreq;
+
+ /*
+ * APB clock divider: Fout = AHB Frequency / (n+1). Divider settings
+ * n = AHB Frequency / Pclk Frequency - 1. Avoid division for extreme
+ * cases of very small, or very large request via direct comparison.
+ * Check against clock frequency maximum - this the only one bus clock
+ * that may have different (lower) maximum limit.
+ */
+ ClkFreq = *pPclkFreq;
+ NV_ASSERT(ClkFreq <= PclkMaxFreq);
+ if ((ClkFreq << pCinfo->PclkDivisorFieldSize) <= (*pHclkFreq))
+ {
+ PclkDivisor = pCinfo->PclkDivisorFieldMask;
+ *pPclkFreq = (*pHclkFreq) >> pCinfo->PclkDivisorFieldSize;
+ NV_ASSERT(*pPclkFreq <= PclkMaxFreq);
+ }
+ else if ((ClkFreq << 1) > (*pHclkFreq))
+ {
+ PclkDivisor = ((*pHclkFreq) <= PclkMaxFreq)? 0 : 1;
+ *pPclkFreq = (*pHclkFreq) >> PclkDivisor;
+ }
+ else
+ {
+ PclkDivisor = ((*pHclkFreq) / ClkFreq);
+ if ((*pHclkFreq) <= PclkMaxFreq * PclkDivisor)
+ PclkDivisor--;
+ *pPclkFreq = (*pHclkFreq) / (PclkDivisor + 1);
+ }
+ s_ClockSourceFreq[NvRmClockSource_Apb] = *pPclkFreq;
+
+ /*
+ * V-clock divider: Fout = System Frequency * (n + 1) / 2 ^ dividend size.
+ * Divider settings n = (Vclk Frequency << dividend size) / System Frequency.
+ * Do not subtract 1 as integer division would round down, anyway. If VDE
+ * clock is decoupled from the System bus, clear dividend and return 0 kHz.
+ */
+ if (pCinfo->VclkDividendFieldMask)
+ {
+ NV_ASSERT(pVclkFreq);
+ if ((*pVclkFreq) >= SystemFreq)
+ {
+ VclkDividend = pCinfo->VclkDividendFieldMask;
+ *pVclkFreq = SystemFreq;
+ }
+ else
+ {
+ VclkDividend =
+ ((*pVclkFreq) << pCinfo->VclkDividendFieldSize) / (SystemFreq + 1);
+ *pVclkFreq =
+ (SystemFreq * (VclkDividend + 1)) >> pCinfo->VclkDividendFieldSize;
+ }
+ s_ClockSourceFreq[NvRmClockSource_Vbus] = *pVclkFreq;
+ }
+ else
+ {
+ VclkDividend = 0;
+ if (pVclkFreq)
+ *pVclkFreq = 0;
+ s_ClockSourceFreq[NvRmClockSource_Vbus] = 0;
+ }
+
+ /*
+ * Set bus clocks dividers in bus rate control register.
+ * Always enable all bus clocks.
+ */
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->BusRateOffset);
+ reg &= ((~pCinfo->BusClockDisableFields) &
+ (~(pCinfo->HclkDivisorFieldMask << pCinfo->HclkDivisorFieldShift)) &
+ (~(pCinfo->PclkDivisorFieldMask << pCinfo->PclkDivisorFieldShift)) &
+ (~(pCinfo->VclkDividendFieldMask << pCinfo->VclkDividendFieldShift)));
+ reg |= ((HclkDivisor << pCinfo->HclkDivisorFieldShift) |
+ (PclkDivisor << pCinfo->PclkDivisorFieldShift) |
+ (VclkDividend << pCinfo->VclkDividendFieldShift));
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->BusRateOffset, reg);
+}
+
+void
+NvRmPrivBusClockFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz SystemFreq,
+ NvRmFreqKHz* pVclkFreq,
+ NvRmFreqKHz* pHclkFreq,
+ NvRmFreqKHz* pPclkFreq)
+{
+ NvU32 VclkDividend, HclkDivisor, PclkDivisor, reg;
+ const NvRmSystemBusComplexInfo* pCinfo =
+ GetSystemBusComplexHandle(hRmDevice);
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(SystemFreq);
+ NV_ASSERT(pHclkFreq && pPclkFreq);
+
+ // Get current bus dividers settings
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->BusRateOffset);
+ NV_ASSERT((reg & pCinfo->BusClockDisableFields) == 0);
+
+ HclkDivisor = (reg >> pCinfo->HclkDivisorFieldShift) & pCinfo->HclkDivisorFieldMask;
+ PclkDivisor = (reg >> pCinfo->PclkDivisorFieldShift) & pCinfo->PclkDivisorFieldMask;
+ VclkDividend = (reg >> pCinfo->VclkDividendFieldShift) & pCinfo->VclkDividendFieldMask;
+
+ /*
+ * AHB clock divider: Fout = System Frequency / (n+1). Avoid division
+ * for extreme cases of min/max divider values.
+ */
+ if (HclkDivisor == 0)
+ *pHclkFreq = SystemFreq;
+ else if (HclkDivisor == pCinfo->HclkDivisorFieldMask)
+ *pHclkFreq = SystemFreq >> pCinfo->HclkDivisorFieldSize;
+ else
+ *pHclkFreq = SystemFreq / (HclkDivisor + 1);
+
+ /*
+ * APB clock divider: Fout = AHB Frequency / (n+1). Avoid division
+ * for extreme cases of min/max divider values.
+ */
+ if (PclkDivisor == 0)
+ *pPclkFreq = *pHclkFreq;
+ else if (PclkDivisor == pCinfo->PclkDivisorFieldMask)
+ *pPclkFreq = (*pHclkFreq) >> pCinfo->PclkDivisorFieldSize;
+ else
+ *pPclkFreq = (*pHclkFreq) / (PclkDivisor + 1);
+
+ /*
+ * V-clock divider: Fout = System Frequency * (n + 1) / 2 ^ dividend size.
+ * If VDE clock is decoupled from the System bus, return 0 kHz.
+ */
+ if (pCinfo->VclkDividendFieldMask)
+ {
+ NV_ASSERT(pVclkFreq);
+ *pVclkFreq =
+ (SystemFreq * (VclkDividend + 1)) >> pCinfo->VclkDividendFieldSize;
+ }
+ else if (pVclkFreq)
+ *pVclkFreq = 0;
+}
+
+/*****************************************************************************/
+
+void
+NvRmPrivPllFreqUpdate(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmPllClockInfo* pCinfo)
+{
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo);
+
+ s_ClockSourceFreq[pCinfo->SourceId] =
+ NvRmPrivAp15PllFreqGet(hRmDevice, pCinfo);
+}
+
+void
+NvRmPrivDividerFreqUpdate(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDividerClockInfo* pCinfo)
+{
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo);
+
+ s_ClockSourceFreq[pCinfo->SourceId] =
+ NvRmPrivDividerFreqGet(hRmDevice, pCinfo);
+}
+
+void
+NvRmPrivDividerSet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDividerClockInfo* pCinfo,
+ NvU32 setting)
+{
+ NvU32 reg;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo);
+ NV_ASSERT(pCinfo->ClkControlOffset);
+
+ reg = NV_REGR(
+ hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->ClkControlOffset);
+
+ // Make sure divider is enabled. Update rate field for divider with
+ // variable divisor
+ reg &= (~(pCinfo->ClkControlField));
+ reg |= pCinfo->ClkEnableSettings;
+ if (pCinfo->FixedRateSetting == NVRM_VARIABLE_DIVIDER)
+ {
+ reg &= (~(pCinfo->ClkRateFieldMask << pCinfo->ClkRateFieldShift));
+ reg |= ((setting & pCinfo->ClkRateFieldMask) << pCinfo->ClkRateFieldShift);
+ }
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->ClkControlOffset, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ s_ClockSourceFreq[pCinfo->SourceId] = NvRmPrivDividerFreqGet(hRmDevice, pCinfo);
+}
+
+NvRmFreqKHz
+NvRmPrivDividerFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDividerClockInfo* pCinfo)
+{
+ NvRmFreqKHz DividerKHz;
+ NvU32 reg, n;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo);
+ NV_ASSERT(pCinfo->ClkControlOffset);
+
+ reg = NV_REGR(
+ hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->ClkControlOffset);
+
+ // Return 0 kHz if divider is disabled
+ if ((pCinfo->ClkControlField != 0) &&
+ ((reg & pCinfo->ClkControlField) == pCinfo->ClkDisableSettings))
+ {
+ return 0;
+ }
+ // Determine divider rate setting
+ n = pCinfo->FixedRateSetting;
+ if (n == NVRM_VARIABLE_DIVIDER)
+ {
+ n = ((reg >> pCinfo->ClkRateFieldShift) & pCinfo->ClkRateFieldMask);
+ }
+
+ // Calculate output frequency
+ DividerKHz = s_ClockSourceFreq[pCinfo->InputId];
+ switch (pCinfo->Divider)
+ {
+ case NvRmClockDivider_Keeper16:
+ return ((DividerKHz * (n + 1)) >> 4);
+ case NvRmClockDivider_Skipper16:
+ return ((DividerKHz * (16 - n)) >> 4);
+ case NvRmClockDivider_Fractional_2:
+ n += 2;
+ DividerKHz = DividerKHz << 1;
+ break;
+ case NvRmClockDivider_Integer_1:
+ n += 1;
+ break;
+ case NvRmClockDivider_Integer:
+ break;
+ default:
+ NV_ASSERT(!"Invalid divider type");
+ return 0;
+ }
+ NV_ASSERT(n != 0);
+ return (DividerKHz / n);
+}
+
+
+// Shortcut (this mask can be retrieved from module clock information table)
+#define NVRM_FRACTIONAL_DIVISOR_FIELD_MASK (0xFF)
+
+NvU32
+NvRmPrivFindFreqMinAbove(
+ NvRmClockDivider DividerType,
+ NvRmFreqKHz SourceKHz,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz* pTargetKHz)
+{
+ NvU32 n;
+ NV_ASSERT(pTargetKHz);
+ NV_ASSERT( ((*pTargetKHz) != 0) && ((*pTargetKHz) <= MaxKHz) );
+ NV_ASSERT(DividerType == NvRmClockDivider_Fractional_2); // only this type
+
+ /*
+ * Get fractional divider setting n for the best target approximation from
+ * the above. Fractional divider: FoutKHz = (2 * FinKHz) / (n + 2)
+ */
+ if ((*pTargetKHz) < SourceKHz)
+ {
+ SourceKHz = SourceKHz << 1;
+ n = SourceKHz / (*pTargetKHz);
+ if (SourceKHz > n * MaxKHz)
+ n++;
+ *pTargetKHz = SourceKHz / n;
+ n = n - 2;
+ }
+ else
+ {
+ n = 0;
+ *pTargetKHz = SourceKHz;
+ }
+ NV_ASSERT(n <= NVRM_FRACTIONAL_DIVISOR_FIELD_MASK);
+ return n;
+}
+
+NvU32
+NvRmPrivFindFreqMaxBelow(
+ NvRmClockDivider DividerType,
+ NvRmFreqKHz SourceKHz,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz* pTargetKHz)
+{
+ NvU32 n;
+ NV_ASSERT(pTargetKHz);
+ NV_ASSERT( ((*pTargetKHz) != 0) && ((*pTargetKHz) <= MaxKHz) );
+ NV_ASSERT(DividerType == NvRmClockDivider_Fractional_2); // only this type
+
+ /*
+ * Get fractional divider setting n for the best target approximation from
+ * the below. Fractional divider: FoutKHz = (2 * FinKHz) / (n + 2)
+ */
+ if ((*pTargetKHz) < SourceKHz)
+ {
+ SourceKHz = SourceKHz << 1;
+ n = (SourceKHz + (*pTargetKHz) - 1) / (*pTargetKHz);
+ *pTargetKHz = SourceKHz / n;
+ n = n - 2;
+ }
+ else
+ {
+ n = 0;
+ *pTargetKHz = SourceKHz;
+ }
+ NV_ASSERT(n <= NVRM_FRACTIONAL_DIVISOR_FIELD_MASK);
+ return n;
+}
+
+void
+NvRmPrivSelectorClockSet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmSelectorClockInfo* pCinfo,
+ NvRmClockSource SourceId,
+ NvBool Double)
+{
+ NvU32 i, reg;
+ NvRmFreqKHz SourceFreq;
+ NvU32 SrcIndex = NvRmClockSource_Num; // source index out of valid range
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo);
+ NV_ASSERT(pCinfo->SelectorOffset);
+
+ // Find selector index for the specified input clock source
+ for (i = 0; i < NvRmClockSource_Num; i++)
+ {
+ if (pCinfo->Sources[i] == SourceId)
+ {
+ SrcIndex = i;
+ break;
+ }
+ }
+ NV_ASSERT(SrcIndex < NvRmClockSource_Num);
+
+ // Select specified clock source
+ NV_ASSERT(SrcIndex <= pCinfo->SourceFieldMask);
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->SelectorOffset);
+ reg &= (~(pCinfo->SourceFieldMask << pCinfo->SourceFieldShift));
+ reg |= (SrcIndex << pCinfo->SourceFieldShift);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->SelectorOffset, reg);
+ SourceFreq = s_ClockSourceFreq[SourceId];
+
+ // Enable/Disable doubler
+ if (pCinfo->DoublerEnableField != 0)
+ {
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->DoublerEnableOffset);
+ if (Double)
+ {
+ reg |= pCinfo->DoublerEnableField;
+ SourceFreq = SourceFreq << 1;
+ }
+ else
+ {
+ reg &= (~pCinfo->DoublerEnableField);
+ SourceFreq = 0; // no clock out if doubler disabled
+ }
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->DoublerEnableOffset, reg);
+ }
+ s_ClockSourceFreq[pCinfo->SourceId] = SourceFreq;
+}
+
+/*****************************************************************************/
+
+void NvRmPrivParseClockSources(
+ NvRmClockSourceInfo* pDst,
+ NvU32 DestinationTableSize,
+ NvRmClockSourceInfoPtr Src,
+ NvU32 SourceTableSize,
+ NvRmClockSourceType SourceType)
+{
+ NvU32 i;
+ NvRmClockSource id = NvRmClockSource_Invalid;
+ NV_ASSERT(pDst);
+
+ for (i = 0; i < SourceTableSize; i++)
+ {
+ // Bsed on specified source type retrieve source id
+ // from the source table
+ switch (SourceType)
+ {
+ case NvRmClockSourceType_Fixed:
+ id = Src.pFixed[i].SourceId;
+ pDst[id].pInfo.pFixed = &Src.pFixed[i];
+ break;
+ case NvRmClockSourceType_Pll:
+ id = Src.pPll[i].SourceId;
+ pDst[id].pInfo.pPll = &Src.pPll[i];
+ break;
+ case NvRmClockSourceType_Divider:
+ id = Src.pDivider[i].SourceId;
+ pDst[id].pInfo.pDivider = &Src.pDivider[i];
+ break;
+ case NvRmClockSourceType_Core:
+ id = Src.pCore[i].SourceId;
+ pDst[id].pInfo.pCore = &Src.pCore[i];
+ break;
+ case NvRmClockSourceType_Selector:
+ id = Src.pSelector[i].SourceId;
+ pDst[id].pInfo.pSelector = &Src.pSelector[i];
+ break;
+ default:
+ NV_ASSERT(!"Not defined source type");
+ }
+ // Fill in destination table
+ NV_ASSERT((NvU32)id < DestinationTableSize);
+ NV_ASSERT(pDst[id].SourceId == NvRmClockSource_Invalid);
+ pDst[id].SourceId = id;
+ pDst[id].SourceType = SourceType;
+ }
+}
+
+NvRmClockSourceInfo* NvRmPrivGetClockSourceHandle(NvRmClockSource id)
+{
+ NvRmClockSourceInfo* pSource = NULL;
+
+ NV_ASSERT((id != NvRmClockSource_Invalid) && (id < NvRmClockSource_Num));
+ if (s_ClockSourceTable[id].SourceId == id)
+ {
+ pSource = &s_ClockSourceTable[id];
+ NV_ASSERT(pSource->pInfo.pFixed);
+ }
+ return pSource;
+}
+
+NvRmFreqKHz
+NvRmPrivGetClockSourceFreq(NvRmClockSource id)
+{
+ NV_ASSERT(id < NvRmClockSource_Num);
+ return s_ClockSourceFreq[id];
+}
+
+NvRmFreqKHz
+NvRmPowerGetPrimaryFrequency(
+ NvRmDeviceHandle hRmDeviceHandle)
+{
+ return s_ClockSourceFreq[NvRmClockSource_ClkM];
+}
+
+NvBool
+NvRmPrivIsSourceSelectedByModule(
+ NvRmDeviceHandle hRmDevice,
+ NvRmClockSource SourceId,
+ NvRmModuleID ModuleId)
+{
+ NvError Error;
+ NvU32 SourceIndex = 0;
+ NvRmModuleClockInfo* pCinfo;
+ NvRmModuleInstance* pInst = NULL;
+ NV_ASSERT(hRmDevice);
+
+ Error = NvRmPrivGetModuleInstance(hRmDevice, ModuleId, &pInst);
+ if (Error != NvSuccess)
+ return NV_FALSE; // Module is not present - not using anything
+
+ pCinfo = (NvRmModuleClockInfo*)pInst->ModuleData;
+ if (pCinfo->ClkSourceOffset != 0)
+ {
+ SourceIndex = NV_REGR(
+ hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, pCinfo->ClkSourceOffset);
+ SourceIndex =
+ (SourceIndex >> pCinfo->SourceFieldShift) & pCinfo->SourceFieldMask;
+ }
+ return (pCinfo->Sources[SourceIndex] == SourceId);
+}
+
+NvBool
+NvRmIsFreqRangeReachable(
+ NvRmFreqKHz SourceFreq,
+ NvRmFreqKHz MinFreq,
+ NvRmFreqKHz MaxFreq,
+ NvU32 MaxDivisor)
+{
+ NvU32 divisor;
+ NV_ASSERT(SourceFreq && MaxFreq);
+ NV_ASSERT(MinFreq <= MaxFreq);
+
+ // Determine minimum divisor that satisfies maximum boundary
+ divisor = SourceFreq / MaxFreq;
+ if ((divisor * MaxFreq) < SourceFreq)
+ {
+ divisor += 1;
+ }
+ // The specified range is reachable if minimum divisor is
+ // fits divisor field and satisfies minimum boundary
+ if ((divisor <= MaxDivisor) &&
+ ((divisor * MinFreq) <= SourceFreq))
+ {
+ return NV_TRUE;
+ }
+ return NV_FALSE;
+}
+
+const NvRmModuleClockLimits*
+NvRmPrivGetSocClockLimits(NvRmModuleID Module)
+{
+ NV_ASSERT(Module < NvRmPrivModuleID_Num);
+ return &s_ModuleClockLimits[Module];
+}
+
+void NvRmPrivLockSharedPll(void)
+{
+ NvOsMutexLock(s_hPllMutex);
+}
+
+void NvRmPrivUnlockSharedPll(void)
+{
+ NvOsMutexUnlock(s_hPllMutex);
+}
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+// PLLC may be selected as a source only for Display, TVO, GPU, and VDE
+// modules. (It is also used for CPU and System/Avp core clocks, controlled
+// by DFS with its own configuration path - no need to specify here)
+static const NvRmModuleID s_Ap15PllC0UsagePolicy[] =
+{
+ NvRmModuleID_Display,
+ NvRmModuleID_3D,
+ NvRmModuleID_2D,
+ NvRmModuleID_Mpe,
+};
+
+static const NvRmModuleID s_Ap20PllC0UsagePolicy[] =
+{
+ NvRmModuleID_Display,
+ NvRmModuleID_Tvo,
+ NvRmModuleID_3D,
+ NvRmModuleID_2D,
+ NvRmModuleID_Mpe,
+ NvRmModuleID_Vde
+};
+
+// PLLM may be selected as a source for GPU, UART and VDE modules. (It is also
+// used for EMC, CPU and System/Avp core clocks, controlled by DFS with its
+// own configuration path - no need to specify here)
+static const NvRmModuleID s_Ap15PllM0UsagePolicy[] =
+{
+ NvRmModuleID_GraphicsHost,
+ NvRmModuleID_Vi,
+ NvRmModuleID_3D,
+ NvRmModuleID_2D,
+ NvRmModuleID_Epp,
+ NvRmModuleID_Mpe,
+ NvRmModuleID_Vde,
+ NvRmModuleID_Uart
+};
+
+// PLLD may be selected as a source only for Display, HDMI, and DSI modules.
+static const NvRmModuleID s_Ap15PllD0UsagePolicy[] =
+{
+ NvRmModuleID_Display,
+ NvRmModuleID_Hdmi,
+ NvRmModuleID_Dsi
+};
+
+// PLLA may be selected as a source only for I2S and SPDIF modules.
+static const NvRmModuleID s_Ap15PllA0UsagePolicy[] =
+{
+ NvRmModuleID_I2s,
+ NvRmModuleID_Spdif,
+};
+
+static const NvRmModuleID*
+GetPolicySourceToModuleList(
+ NvRmDeviceHandle hRmDevice,
+ NvRmClockSource SourceId,
+ NvU32* pListSize)
+{
+ NV_ASSERT(hRmDevice && pListSize);
+
+ // Unless explicitly overwritten, use AP15 policy as a base for all SoCs;
+ // return list of modules that may use specified source
+ switch (SourceId)
+ {
+ case NvRmClockSource_PllC0:
+ if (hRmDevice->ChipId.Id == 0x20)
+ {
+ *pListSize = NV_ARRAY_SIZE(s_Ap20PllC0UsagePolicy);
+ return s_Ap20PllC0UsagePolicy;
+ }
+ *pListSize = NV_ARRAY_SIZE(s_Ap15PllC0UsagePolicy);
+ return s_Ap15PllC0UsagePolicy;
+
+ case NvRmClockSource_PllM0:
+ *pListSize = NV_ARRAY_SIZE(s_Ap15PllM0UsagePolicy);
+ return s_Ap15PllM0UsagePolicy;
+
+ case NvRmClockSource_PllD0:
+ *pListSize = NV_ARRAY_SIZE(s_Ap15PllD0UsagePolicy);
+ return s_Ap15PllD0UsagePolicy;
+
+ case NvRmClockSource_PllA0:
+ case NvRmClockSource_AudioSync:
+ *pListSize = NV_ARRAY_SIZE(s_Ap15PllA0UsagePolicy);
+ return s_Ap15PllA0UsagePolicy;
+
+ default:
+ *pListSize = 0;
+ return NULL; // No policy - any module may use the source
+ }
+}
+
+NvBool
+NvRmPrivIsSourceProtected(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module,
+ NvRmClockSource SourceId)
+{
+ NvU32 i, ListSize;
+ const NvRmModuleID* pModuleList = GetPolicySourceToModuleList(
+ hRmDevice, SourceId, &ListSize);
+
+ if (pModuleList)
+ {
+ // Policy in place - check the module against it
+ NV_ASSERT(ListSize);
+ for (i = 0; i < ListSize; i++)
+ {
+ if (Module == pModuleList[i])
+ return NV_FALSE;
+ }
+ return NV_TRUE;
+ }
+ else
+ {
+ // No policy for this source - just make sure I2C module is
+ // on main clock only
+ if (SourceId != NvRmClockSource_ClkM)
+ {
+ if ((Module == NvRmModuleID_Dvc) ||
+ (Module == NvRmModuleID_I2c))
+ return NV_TRUE;
+ }
+ return NV_FALSE;
+ }
+}
+
+/*****************************************************************************/
+
+void
+NvRmPrivReConfigurePllX(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz TargetFreq)
+{
+ NvRmClockSource SourceId;
+ NvRmFreqKHz f = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllX0);
+ const NvRmCoreClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBus)->pInfo.pCore;
+ NvRmFreqKHz MaxFreq = NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MaxKHz;
+
+ NV_ASSERT(NvRmPrivGetClockSourceHandle(NvRmClockSource_PllX0));
+ NV_ASSERT(TargetFreq <= MaxFreq);
+
+ // Do nothing if current PLLX frequency is below
+ // and close enough to the target
+ if (f <= TargetFreq) // if below - DVS-safe
+ {
+ f += (MaxFreq >> pCinfo->DivisorFieldSize); // CPU divider resolution
+ if (f >= TargetFreq)
+ return;
+ }
+
+ /*
+ * If PLLX is in use by CPU switch CPU to back-up PLLP0 source during PLLX
+ * reconfiguration. This is DVS safe as per DFS policy, PLLX is used for
+ * high frequencies above PLLP0 output. In any case, configure PLLX target
+ * frequency, and let the caller to complete CPU clock configuration (PLLX
+ * is used for CPU only, so the caller is always CPU DVFS)
+ */
+ SourceId = NvRmPrivCoreClockSourceGet(hRmDevice, pCinfo);
+ if (SourceId == NvRmClockSource_PllX0)
+ {
+ SourceId = NvRmClockSource_PllP0;
+ f = NvRmPrivGetClockSourceFreq(SourceId);
+ NV_ASSERT(f <= MaxFreq);
+ NV_ASSERT_SUCCESS(NvRmPrivCoreClockConfigure(
+ hRmDevice, pCinfo, MaxFreq, &f, &SourceId));
+ }
+ SourceId = NvRmClockSource_PllX0;
+ NvRmPrivAp15PllConfigureSimple(hRmDevice, SourceId, TargetFreq, &TargetFreq);
+}
+
+/*****************************************************************************/
+
+static void BackupClockSource(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleClockInfo* pCinfo,
+ NvRmClockSource BackupSource)
+{
+ NvBool Disabled;
+ NvU32 reg, SourceIndex;
+ NvRmModuleID ModuleId;
+
+ NV_ASSERT(pCinfo);
+ ModuleId = NVRM_MODULE_ID(pCinfo->Module, pCinfo->Instance);
+
+ // Check if currently clock is disabled
+ NV_ASSERT(pCinfo->ClkEnableOffset);
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->ClkEnableOffset);
+ Disabled = ((reg & pCinfo->ClkEnableField) != pCinfo->ClkEnableField);
+
+ // Find backup source index
+ for (SourceIndex = 0; SourceIndex < NvRmClockSource_Num; SourceIndex++)
+ {
+ if (pCinfo->Sources[SourceIndex] == BackupSource)
+ break;
+ }
+ NV_ASSERT(SourceIndex < NvRmClockSource_Num);
+
+ // Switch module to backup source clock. If module clock is disabled,
+ // temporarily enable it.
+ if (Disabled)
+ {
+ NvRmPrivEnableModuleClock(hRmDevice, ModuleId, ModuleClockState_Enable);
+ }
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->ClkSourceOffset);
+ reg &= (~(pCinfo->SourceFieldMask << pCinfo->SourceFieldShift));
+ reg |= (SourceIndex << pCinfo->SourceFieldShift);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->ClkSourceOffset, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ if (Disabled)
+ {
+ NvRmPrivEnableModuleClock(hRmDevice, ModuleId, ModuleClockState_Disable);
+ }
+}
+
+static void RestoreClockSource(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleClockInfo* pCinfo,
+ NvRmModuleClockState* pCstate,
+ NvRmFreqKHz NewSourceFreq)
+{
+ NvU32 reg;
+ NvBool Disabled;
+ NvRmModuleID ModuleId;
+
+ NV_ASSERT(pCinfo && pCstate);
+ ModuleId = NVRM_MODULE_ID(pCinfo->Module, pCinfo->Instance);
+
+ // Check if currently clock is disabled
+ NV_ASSERT(pCinfo->ClkEnableOffset);
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->ClkEnableOffset);
+ Disabled = ((reg & pCinfo->ClkEnableField) != pCinfo->ClkEnableField);
+
+ // Restore module clock source If module clock is disabled, temporarily
+ // enable it. Update module v-scale requirements.
+ if (Disabled)
+ {
+ NvRmPrivEnableModuleClock(hRmDevice, ModuleId, ModuleClockState_Enable);
+ }
+ NvRmPrivModuleClockSet(hRmDevice, pCinfo, pCstate);
+ if (Disabled)
+ {
+ NvRmPrivEnableModuleClock(hRmDevice, ModuleId, ModuleClockState_Disable);
+ }
+ NvRmPrivModuleVscaleReAttach(
+ hRmDevice, pCinfo, pCstate, pCstate->actual_freq, NewSourceFreq);
+}
+
+static void BackupModuleClocks(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module,
+ NvRmClockSource UpdatedSource,
+ NvRmClockSource BackupSource)
+{
+ NvU32 j;
+ NvBool SubClock = NV_FALSE;
+ NvRmModuleClockInfo* pCinfo = NULL;
+ NvRmModuleClockState* pCstate = NULL;
+
+ for (j = NvRmModuleGetNumInstances(hRmDevice, Module); j != 0; j--)
+ {
+ NV_ASSERT_SUCCESS(NvRmPrivGetClockState(
+ hRmDevice, NVRM_MODULE_ID(Module, j-1), &pCinfo, &pCstate));
+ do
+ {
+ // If on updated source, switch module to backup source. Note
+ // that module clock state records are preserved and will be used
+ // to restore clock configuration after source update completed.
+ NV_ASSERT(NvRmPrivGetClockSourceFreq(BackupSource) <=
+ NvRmPrivGetClockSourceFreq(UpdatedSource));
+ if (pCinfo->Sources[pCstate->SourceClock] == UpdatedSource)
+ BackupClockSource(hRmDevice, pCinfo, BackupSource);
+
+ // Check if module subclock should be backed up as well
+ // TODO: boundary check
+ pCinfo++;
+ pCstate++;
+ SubClock = (pCinfo->Module == Module) &&
+ (pCinfo->Instance == (j - 1));
+ } while (SubClock);
+ }
+}
+
+static void
+RestoreModuleClocks(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module,
+ NvRmClockSource UpdatedSource,
+ NvRmFreqKHz NewSourceFreq)
+{
+ NvU32 j;
+ NvRmFreqKHz MaxFreq;
+ NvBool SubClock = NV_FALSE;
+ NvRmModuleClockInfo* pCinfo = NULL;
+ NvRmModuleClockState* pCstate = NULL;
+
+ MaxFreq = NvRmPrivGetSocClockLimits(Module)->MaxKHz;
+ for (j = NvRmModuleGetNumInstances(hRmDevice, Module); j != 0; j--)
+ {
+ NV_ASSERT_SUCCESS(NvRmPrivGetClockState(
+ hRmDevice, NVRM_MODULE_ID(Module, j-1), &pCinfo, &pCstate));
+ do
+ {
+ // Restore updated module clock source, and set divider to get as
+ // close/above to previous frequency as new source output allows.
+ if (pCinfo->Sources[pCstate->SourceClock] == UpdatedSource)
+ {
+ pCstate->Divider = NvRmPrivFindFreqMinAbove(
+ pCinfo->Divider, NewSourceFreq, MaxFreq, &pCstate->actual_freq);
+ RestoreClockSource(hRmDevice, pCinfo, pCstate, NewSourceFreq);
+ }
+
+ // Check if module subclock should be backed up as well
+ // TODO: boundary check
+ pCinfo++;
+ pCstate++;
+ SubClock = (pCinfo->Module == Module) &&
+ (pCinfo->Instance == (j - 1));
+ } while (SubClock);
+ }
+}
+
+/*****************************************************************************/
+
+static void PllCBackupModuleClocks(NvRmDeviceHandle hRmDevice)
+{
+ NvU32 i, ListSize;
+ NvRmModuleID Module;
+ const NvRmModuleID* pModuleList = GetPolicySourceToModuleList(
+ hRmDevice, NvRmClockSource_PllC0, &ListSize);
+ NV_ASSERT(pModuleList && ListSize);
+
+ // Check all modules that can use PLLC0 as a source, and switch to PLLP0
+ // as a backcup source
+ for (i = 0; i < ListSize; i++)
+ {
+ Module = pModuleList[i];
+ BackupModuleClocks(
+ hRmDevice, Module, NvRmClockSource_PllC0, NvRmClockSource_PllP0);
+ }
+}
+
+static void
+PllCRestoreModuleClocks(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz NewPllCFreq)
+{
+ NvU32 i, ListSize;
+ NvRmModuleID Module;
+ const NvRmModuleID* pModuleList = GetPolicySourceToModuleList(
+ hRmDevice, NvRmClockSource_PllC0, &ListSize);
+ NV_ASSERT(pModuleList && ListSize);
+
+ // Check all modules that can use PLLC0 as a source, and restore source
+ // configuration
+ for (i = 0; i < ListSize; i++)
+ {
+ // Skip display (PLLC is adjusted as part of display configuration)
+ Module = pModuleList[i];
+ if (Module == NvRmModuleID_Display)
+ continue;
+
+ RestoreModuleClocks(
+ hRmDevice, Module, NvRmClockSource_PllC0, NewPllCFreq);
+ }
+}
+
+static NvRmFreqKHz PllCBackupCpuClock(NvRmDeviceHandle hRmDevice)
+{
+ NvRmClockSource SourceId;
+ NvRmFreqKHz OldCpuFreq = 0;
+ const NvRmCoreClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBus)->pInfo.pCore;
+
+ // If PLLC0 is used as a source for CPU clock - switch CPU to PLLP0, and
+ // return saved CPU clock frequency (to be restored). Note that DVFS uses
+ // PLLC0 as a source only for frequencies above PLLP0
+ SourceId = NvRmPrivCoreClockSourceGet(hRmDevice, pCinfo);
+ if (SourceId == NvRmClockSource_PllC0)
+ {
+ OldCpuFreq = NvRmPrivGetClockSourceFreq(NvRmClockSource_CpuBus);
+ NV_ASSERT(NvRmPrivGetClockSourceFreq(NvRmClockSource_PllP0) <=
+ OldCpuFreq);
+ NV_ASSERT(NvRmPrivGetClockSourceFreq(NvRmClockSource_PllP0) <=
+ NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MaxKHz);
+ NvRmPrivCoreClockSet(hRmDevice, pCinfo, NvRmClockSource_PllP0, 0, 0);
+ }
+ return OldCpuFreq; // frequency for restoration, or 0 if no restoration
+}
+
+static void
+PllCRestoreCpuClock(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz NewPllCFreq,
+ NvRmFreqKHz OldCpuFreq)
+{
+ // Restore CPU clock as high as new PLLC0 output allows, provoded PLLC0
+ // was used as a source for CPU
+ if (OldCpuFreq != 0)
+ {
+ NvRmClockSource SourceId = NvRmClockSource_PllC0;
+ NvRmFreqKHz CpuFreq = NV_MIN(NewPllCFreq, OldCpuFreq);
+ const NvRmCoreClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBus)->pInfo.pCore;
+ NvRmFreqKHz MaxFreq =
+ NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MaxKHz;
+
+ NV_ASSERT_SUCCESS(NvRmPrivCoreClockConfigure(
+ hRmDevice, pCinfo, MaxFreq, &CpuFreq, &SourceId));
+ }
+}
+
+static NvRmFreqKHz PllCBackupSystemClock(NvRmDeviceHandle hRmDevice)
+{
+ NvRmClockSource SourceId;
+ NvRmFreqKHz OldSysFreq = 0;
+ const NvRmCoreClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_SystemBus)->pInfo.pCore;
+
+ // If PLLC1 divider output is used as a source for System clock - switch
+ // System clock to to PLLP2, and return saved System clock frequency (to
+ // be restored). Note that DVFS uses PLLC1 as a source starting with AP20
+ SourceId = NvRmPrivCoreClockSourceGet(hRmDevice, pCinfo);
+ if (SourceId == NvRmClockSource_PllC1)
+ {
+ OldSysFreq = NvRmPrivGetClockSourceFreq(NvRmClockSource_SystemBus);
+ NV_ASSERT(hRmDevice->ChipId.Id >= 0x20);
+ NV_ASSERT(NvRmPrivGetClockSourceFreq(NvRmClockSource_PllP2) <=
+ NvRmPrivGetSocClockLimits(NvRmPrivModuleID_System)->MaxKHz);
+ NvRmPrivCoreClockSet(hRmDevice, pCinfo, NvRmClockSource_PllP2, 0, 0);
+ }
+ return OldSysFreq; // frequency for restoration, or 0 if no restoration
+}
+
+static void
+PllCRestoreSystemClock(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz NewPllCFreq,
+ NvRmFreqKHz OldSysFreq)
+{
+ NvU32 divc1;
+ NvRmFreqKHz SysFreq;
+ const NvRmClockSourceInfo* pSrcCinfo;
+ NvRmClockSource SourceId = NvRmClockSource_PllC1;
+ NvRmFreqKHz MaxFreq =
+ NvRmPrivGetSocClockLimits(NvRmPrivModuleID_System)->MaxKHz;
+
+ // Reconfigure PLLC1 divider at maximum possible frequency
+ SysFreq = MaxFreq;
+ divc1 = NvRmPrivFindFreqMaxBelow(
+ NvRmClockDivider_Fractional_2, NewPllCFreq, MaxFreq, &SysFreq);
+ pSrcCinfo = NvRmPrivGetClockSourceHandle(NvRmClockSource_PllC1);
+ NvRmPrivDividerSet(hRmDevice, pSrcCinfo->pInfo.pDivider, divc1);
+
+ // Restore System clock as high as new PLLC1 output allows, provoded PLLC1
+ // was used as a source for System clock
+ if (OldSysFreq != 0)
+ {
+ SysFreq = NV_MIN(SysFreq, OldSysFreq);
+ pSrcCinfo = NvRmPrivGetClockSourceHandle(NvRmClockSource_SystemBus);
+ NV_ASSERT_SUCCESS(NvRmPrivCoreClockConfigure(
+ hRmDevice, pSrcCinfo->pInfo.pCore, MaxFreq, &SysFreq, &SourceId));
+ NvRmPrivBusClockInit(hRmDevice, SysFreq);
+ }
+}
+
+NvRmFreqKHz NvRmPrivGetMaxFreqPllC(NvRmDeviceHandle hRmDevice)
+{
+ // PLLC maximum limit is fixed for SoC with dedicated CPU PLLX; otherwise
+ // it is equal to CPU maximum frequency, as PLLC is a primary CPU source.
+ if (NvRmPrivGetClockSourceHandle(NvRmClockSource_PllX0))
+ return NVRM_PLLC_DEFAULT_FREQ_KHZ;
+ else
+ return NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MaxKHz;
+}
+
+/*
+ * PLLC is reconfigured:
+ * (a) when RM is setting fast clocks during boot/resume from deep sleep,
+ * provided PLLC is not already in use by any of the display heads
+ * (b) when DDK/ODM is reconfiguring display clock (typically PLLC is required
+ * for CRT)
+ *
+ * In both cases core voltage is set at nominal - reconfiguration is DVS-save.
+ * Core clocks that use PLLC: CPU and System bus (starting with AP20) - are
+ * switched to PLLP during reconfiguration and restored afterwards. Module
+ * clocks that use PLLC are backed up to PLLP and then restored as well, with
+ * the exception of display, which does not need restoration in a process of
+ * reconfiguration (case b).
+ */
+void
+NvRmPrivReConfigurePllC(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz TargetFreq)
+{
+ NvRmFreqKHz CpuFreq, SysFreq, MaxFreq;
+ const NvRmCoreClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBus)->pInfo.pCore;
+
+ // If maximum PLLC target is requested, and current PLLC output is close
+ // enough - exit without adjusting PLLC (use CPU divider resolution as
+ // "close enough" criteria). For specific PLLC target, find multiple of
+ // target frequency as close as possible to PLLC maximum limit.
+ MaxFreq = NvRmPrivGetMaxFreqPllC(hRmDevice);
+ if (TargetFreq == NvRmFreqMaximum)
+ {
+ TargetFreq = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllC0);
+ if (TargetFreq <= MaxFreq)
+ {
+ TargetFreq += (MaxFreq >> pCinfo->DivisorFieldSize);
+ if (TargetFreq >= MaxFreq)
+ return;
+ }
+ TargetFreq = MaxFreq;
+ }
+ NV_ASSERT(TargetFreq <= MaxFreq);
+ NV_ASSERT((TargetFreq * NVRM_DISPLAY_DIVIDER_MAX) >= MaxFreq);
+ TargetFreq = (MaxFreq / TargetFreq) * TargetFreq;
+
+ // Backup core and/or module clocks that are using PLLC as a clock source
+ // at the moment. Reconfigure PLLC to the new target, and restore backuped
+ // clocks as close as possible with the new PLLC output frequency
+ CpuFreq = PllCBackupCpuClock(hRmDevice);
+ SysFreq = PllCBackupSystemClock(hRmDevice);
+ PllCBackupModuleClocks(hRmDevice);
+
+ NvRmPrivAp15PllConfigureSimple(
+ hRmDevice, NvRmClockSource_PllC0, TargetFreq, &TargetFreq);
+
+ PllCRestoreCpuClock(hRmDevice, TargetFreq, CpuFreq);
+ PllCRestoreSystemClock(hRmDevice, TargetFreq, SysFreq);
+ PllCRestoreModuleClocks(hRmDevice, TargetFreq);
+
+#if !NV_OAL
+ // Resync DFS as PLLC may be reconfigured for display "behind DFS back"
+ if (NvRmPrivGetExecPlatform(hRmDevice) == ExecPlatform_Soc)
+ NvRmPrivDfsResync();
+#endif
+}
+
+void NvRmPrivBoostPllC(NvRmDeviceHandle hRmDevice)
+{
+ // Boost PLLC to maximum output, if it is not used as pixel clock source
+ if (!NvRmPrivIsSourceSelectedByModule(hRmDevice, NvRmClockSource_PllC0,
+ NVRM_MODULE_ID(NvRmModuleID_Display, 0)) &&
+ !NvRmPrivIsSourceSelectedByModule(hRmDevice, NvRmClockSource_PllC0,
+ NVRM_MODULE_ID(NvRmModuleID_Display, 1))
+ )
+ NvRmPrivReConfigurePllC(hRmDevice, NvRmFreqMaximum);
+}
+
+/*****************************************************************************/
+
+
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap15/nvrm_diag.c b/arch/arm/mach-tegra/nvrm/core/ap15/nvrm_diag.c
new file mode 100644
index 000000000000..f8b1b7322232
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap15/nvrm_diag.c
@@ -0,0 +1,1376 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvrm_diag.h"
+#include "nvrm_clocks.h"
+#include "nvassert.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_pmu.h"
+#include "nvrm_pmu_private.h"
+#include "nvodm_query_discovery.h"
+#include "ap15rm_private.h"
+#include "ap15/ap15rm_clocks.h"
+#include "ap20/ap20rm_clocks.h"
+#include "ap15/project_relocation_table.h"
+
+#if (NV_DEBUG)
+#define NVRM_DIAG_PRINTF(x) NvOsDebugPrintf x
+#else
+#define NVRM_DIAG_PRINTF(x)
+#endif
+
+// TODO: remove this define when it is added to re-location table header
+#if !defined(NV_POWERGROUP_INVALID)
+#define NV_POWERGROUP_INVALID (0xFFFF)
+#endif
+
+/*
+ * Holds mapping information between diagnostic module Ids and pointers to
+ * clock information structures
+ */
+typedef struct DiagModuleMappingRec
+{
+ // Index mapping diagnostic module Id into the base pointer to the
+ // respective module clock information structure
+ NvU32 BaseIndex;
+
+ // Total number of the module instances
+ NvU32 InstancesNum;
+} DiagModuleMapping;
+
+/*
+ * Combines modules diagnostic information
+ */
+typedef struct NvRmDiagModulesRec
+{
+ // Size of module information table
+ NvU32 ModuleClockTableSize;
+
+ // Module clock and reset information table
+ const NvRmModuleClockInfo* ModuleClockTable;
+
+ // Table of module instnace pointers into the information table
+ const NvRmModuleClockInfo** pInstancePtrs;
+
+ // Mapping indexes of module insatances
+ DiagModuleMapping InstancesMap[NvRmDiagModuleID_Num];
+} NvRmDiagModules;
+
+/*
+ * Combines clock sources diagnostic information
+ */
+typedef struct NvRmDiagSourcesRec
+{
+ // Total number of available clock sources
+ NvU32 ClockSourcesNum;
+
+ // Map between clock source IDs and handles
+ NvRmDiagClockSourceHandle hSources[NvRmClockSource_Num];
+} NvRmDiagSources;
+
+// RM handle for diagnostic mode
+NvRmDeviceHandle s_hDiagRm = NULL;
+
+/*
+ * Holds mapping information between power rails and module power
+ * groups
+ */
+typedef struct NvRmDiagPowerRailRec
+{
+ // Power rail GUID
+ NvU64 PowerRailId;
+
+ // List of power group IDs mapped to this rail, terminated
+ // by invalid power group ID
+ NvU32 PowerRailGroups[NV_POWERGROUP_MAX + 1];
+} NvRmDiagPowerRail;
+
+/*
+ * Combines power rails diagnostic information
+ */
+typedef struct NvRmDiagRailsRec
+{
+ // Total number of available module rails
+ NvU32 PowerRailsNum;
+
+ // Power Rails information table
+ const NvRmDiagPowerRail* PowerRailsTable;
+
+ // Combined Module ID and instance of the PMU communication
+ // interface controller
+ NvRmDiagModuleID PmuBusHostDiagId;
+ NvRmModuleID PmuBusHostRmId;
+} NvRmDiagRails;
+
+/*****************************************************************************/
+
+static const NvRmDiagPowerRail s_Ap15PowerRailsTable[] =
+{
+ {
+ NV_VDD_RTC_ODM_ID,
+ {
+ NV_POWERGROUP_AO,
+ NV_POWERGROUP_INVALID
+ }
+ },
+
+ {
+ NV_VDD_CORE_ODM_ID,
+ {
+ NV_POWERGROUP_NPG,
+ NV_POWERGROUP_CPU,
+ NV_POWERGROUP_TD,
+ NV_POWERGROUP_VE,
+ NV_POWERGROUP_INVALID
+ }
+ },
+
+ {
+ NV_VDD_PLLA_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLM_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLP_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLC_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLD_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLU_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLU1_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLHDMI_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_OSC_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+
+ {
+ NV_VDD_SYS_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_USB_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_HDMI_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_MIPI_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_LCD_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_AUD_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_DDR_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_NAND_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_UART_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_SDIO_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_VDAC_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_VI_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_BB_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ }
+};
+static const NvU32 s_Ap15PowerRailsTableSize = NV_ARRAY_SIZE(s_Ap15PowerRailsTable);
+
+static const NvRmDiagPowerRail s_Ap20PowerRailsTable[] =
+{
+ {
+ NV_VDD_RTC_ODM_ID,
+ {
+ NV_POWERGROUP_AO,
+ NV_POWERGROUP_INVALID
+ }
+ },
+
+ {
+ NV_VDD_CORE_ODM_ID,
+ {
+ NV_POWERGROUP_NPG,
+ NV_POWERGROUP_TD,
+ NV_POWERGROUP_VE,
+ NV_POWERGROUP_INVALID
+ }
+ },
+
+ {
+ NV_VDD_CPU_ODM_ID,
+ {
+ NV_POWERGROUP_CPU,
+ NV_POWERGROUP_INVALID
+ }
+ },
+
+ {
+ NV_VDD_PLLA_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLM_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLP_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLC_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLD_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLU_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLU1_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLHDMI_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_PLLX_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_OSC_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+
+ {
+ NV_VDD_SYS_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_USB_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_HDMI_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_MIPI_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_LCD_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_AUD_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_DDR_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_NAND_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_UART_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_SDIO_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_VDAC_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_VI_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ },
+ {
+ NV_VDD_BB_ODM_ID,
+ {
+ NV_POWERGROUP_INVALID
+ }
+ }
+};
+static const NvU32 s_Ap20PowerRailsTableSize = NV_ARRAY_SIZE(s_Ap20PowerRailsTable);
+
+static const NvU64 s_ApClockSourceNames[] =
+{
+ 0x0,
+#define NVRM_CLOCK_SOURCE(A, B, C, D, E, F, G, H, x) \
+((NvU64)((((A)&0xFFULL)<<56) | \
+ (((B)&0xFFULL)<<48) | \
+ (((C)&0xFFULL)<<40) | \
+ (((D)&0xFFULL)<<32) | \
+ (((E)&0xFFULL)<<24) | \
+ (((F)&0xFFULL)<<16) | \
+ (((G)&0xFFULL)<<8) | \
+ (((H)&0xFFULL))) ),
+ #include "nvrm_clockids.h"
+#undef NVRM_CLOCK_SOURCE
+};
+
+// Power rails diagnostic information
+NvRmDiagRails s_Rails = {0};
+
+// Modules diagnostic information
+NvRmDiagModules s_Modules = {0};
+
+// Clock sources diagnostic information
+NvRmDiagSources s_Sources = {0};
+
+/*****************************************************************************/
+
+static NvRmModuleID MapDiagIdToRmId(NvRmDiagModuleID DiagId);
+
+/*****************************************************************************/
+
+NvError
+NvRmDiagEnable(NvRmDeviceHandle hRmDevice)
+{
+ NvU32 i, index;
+ size_t s;
+ NvError error;
+ void* p = NULL;
+
+ /*
+ * Initialize RM handle, which indicates enabled diagnastic mode
+ */
+ NV_ASSERT(hRmDevice);
+ if (s_hDiagRm != NULL)
+ return NvSuccess; // Already enabled and initialized
+ s_hDiagRm = hRmDevice;
+
+ /*
+ * Fill in modules information clear instance map, and allocate
+ * module instance pointers table
+ */
+ if (hRmDevice->ChipId.Id == 0x20)
+ {
+ s_Modules.ModuleClockTableSize = g_Ap20ModuleClockTableSize;
+ s_Modules.ModuleClockTable = g_Ap20ModuleClockTable;
+ s_Rails.PowerRailsNum = s_Ap20PowerRailsTableSize;
+ s_Rails.PowerRailsTable = s_Ap20PowerRailsTable;
+ } else
+ {
+ s_Modules.ModuleClockTableSize = g_Ap15ModuleClockTableSize;
+ s_Modules.ModuleClockTable = g_Ap15ModuleClockTable;
+ s_Rails.PowerRailsNum = s_Ap15PowerRailsTableSize;
+ s_Rails.PowerRailsTable = s_Ap15PowerRailsTable;
+ }
+
+ s_Rails.PmuBusHostDiagId = NvRmDiagModuleID_Dvc; // Default for AP15
+
+
+ NV_ASSERT(s_Modules.ModuleClockTableSize);
+
+ NvOsMemset(s_Modules.InstancesMap, 0, sizeof(s_Modules.InstancesMap));
+ s = sizeof(NvRmModuleClockInfo*) * s_Modules.ModuleClockTableSize;
+ p = NvOsAlloc(s);
+ if (!p)
+ {
+ error = NvError_InsufficientMemory;
+ goto failed;
+ }
+ NvOsMemset(p, 0, s);
+ s_Modules.pInstancePtrs = p;
+
+ /*
+ * Parse module clock/reset information table and fill in mapping arrays.
+ * The table lists all valid (present) modules and only valid modules.
+ */
+ // 1st pass - count module instances
+ for (i = 0; i < s_Modules.ModuleClockTableSize; i++)
+ {
+ NvRmDiagModuleID id = s_Modules.ModuleClockTable[i].DiagModuleID;
+ NV_ASSERT((0 < id) && (id < NvRmDiagModuleID_Num));
+ s_Modules.InstancesMap[id].InstancesNum++;
+ }
+
+ // 2nd pass - fill in mapping indexes
+ for (index = 0, i = 0; i < NvRmDiagModuleID_Num; i++)
+ {
+ DiagModuleMapping* pMapping = &s_Modules.InstancesMap[i];
+ if (pMapping->InstancesNum != 0)
+ {
+ pMapping->BaseIndex = index;
+ index += pMapping->InstancesNum;
+ NV_ASSERT(index <= s_Modules.ModuleClockTableSize);
+ }
+ }
+
+ // 3rd pass - fill in instance pointers
+ for (i = 0; i < s_Modules.ModuleClockTableSize; i++)
+ {
+ DiagModuleMapping* pMapping =
+ &s_Modules.InstancesMap[s_Modules.ModuleClockTable[i].DiagModuleID];
+ NvU32 instance = s_Modules.ModuleClockTable[i].Instance;
+ index = pMapping->BaseIndex + instance;
+
+ NV_ASSERT(instance < pMapping->InstancesNum);
+ NV_ASSERT(s_Modules.pInstancePtrs[index] == NULL);
+
+ s_Modules.pInstancePtrs[index] = &s_Modules.ModuleClockTable[i];
+ }
+
+ // Convert PMU Host diagnostic ID to common RM ID
+ s_Rails.PmuBusHostRmId = MapDiagIdToRmId(s_Rails.PmuBusHostDiagId);
+
+ /*
+ * Parse clock sources information table and map clock source IDs
+ * to handles. Count total available sources.
+ */
+ NvOsMemset(s_Sources.hSources, 0, sizeof(s_Sources.hSources));
+ for (s_Sources.ClockSourcesNum = 0, i = 1; i < NvRmClockSource_Num; i++)
+ {
+ s_Sources.hSources[i] = NvRmPrivGetClockSourceHandle(i);
+ if (s_Sources.hSources[i] != NULL)
+ s_Sources.ClockSourcesNum++;
+ }
+
+ // Make sure DFS is not running
+ NvRmDfsSetState(s_hDiagRm, NvRmDfsRunState_Stopped);
+ return NvSuccess;
+
+failed:
+ NvOsFree(p);
+ NvOsMemset(&s_Modules, 0, sizeof(s_Modules));
+ NvOsMemset(&s_Sources, 0, sizeof(s_Sources));
+ s_hDiagRm = NULL;
+ return error;
+}
+
+/*****************************************************************************/
+
+NvError
+NvRmDiagListModules(
+ NvU32* pListSize,
+ NvRmDiagModuleID* pIdList)
+{
+ NvU32 ModulesNum, i;
+
+ NV_ASSERT(pListSize);
+ NV_ASSERT(pIdList);
+
+ if (s_hDiagRm == NULL)
+ {
+ return NvError_NotInitialized;
+ }
+ ModulesNum = s_Modules.ModuleClockTableSize;
+
+ // Return total number of modules if no room for the output list
+ if ((*pListSize) == 0)
+ {
+ *pListSize = ModulesNum;
+ return NvSuccess;
+ }
+
+ // Return modules list (min of requested and total size)
+ if ((*pListSize) > ModulesNum)
+ {
+ (*pListSize) = ModulesNum;
+ }
+ for (i = 0; i < (*pListSize); i++, pIdList++)
+ {
+ const NvRmModuleClockInfo* pCinfo = &s_Modules.ModuleClockTable[i];
+ *pIdList = NVRM_DIAG_MODULE(pCinfo->DiagModuleID, pCinfo->Instance);
+ }
+ return NvSuccess;
+}
+
+NvError
+NvRmDiagListClockSources(
+ NvU32* pListSize,
+ NvRmDiagClockSourceHandle* phSourceList)
+{
+ NvU32 SourcesNum, i;
+ NV_ASSERT(pListSize);
+ NV_ASSERT(phSourceList);
+
+ if (s_hDiagRm == NULL)
+ {
+ return NvError_NotInitialized;
+ }
+
+ // Return total number of sources if no room for the output list
+ if ((*pListSize) == 0)
+ {
+ *pListSize = s_Sources.ClockSourcesNum;
+ return NvSuccess;
+ }
+
+ // Return sources list (min of requested and total size)
+ for (SourcesNum = 0, i = 0; i < NvRmClockSource_Num; i++)
+ {
+ NvRmDiagClockSourceHandle hSource = s_Sources.hSources[i];
+ if (hSource != NULL)
+ {
+ SourcesNum++;
+ *(phSourceList++) = hSource;
+ if (SourcesNum >= (*pListSize))
+ break;
+ }
+ }
+ *pListSize = SourcesNum;
+ return NvSuccess;
+}
+
+/*****************************************************************************/
+
+NvError
+NvRmDiagModuleListClockSources(
+ NvRmDiagModuleID id,
+ NvU32 * pListSize,
+ NvRmDiagClockSourceHandle* phSourceList)
+{
+ NvU32 SourcesNum, i;
+ const NvRmModuleClockInfo* pCinfo = NULL;
+ NvU32 Instance = NVRM_DIAG_MODULE_INSTANCE(id);
+ NvRmDiagModuleID Module = NVRM_DIAG_MODULE_ID(id);
+
+ NV_ASSERT(pListSize);
+ NV_ASSERT(phSourceList);
+
+ if (s_hDiagRm == NULL)
+ {
+ return NvError_NotInitialized;
+ }
+
+ // Verify module id and get module info
+ NV_ASSERT((Module < NvRmDiagModuleID_Num) &&
+ (Instance < s_Modules.InstancesMap[Module].InstancesNum));
+ pCinfo =
+ s_Modules.pInstancePtrs[s_Modules.InstancesMap[Module].BaseIndex + Instance];
+
+ /*
+ * Return total number of module sources if no room for the output list,
+ * otherwise return module sources list (min of requested and total size)
+ */
+ for (SourcesNum = 0, i = 0; i < NvRmClockSource_Num; i++)
+ {
+ NvRmClockSource source = pCinfo->Sources[i];
+ NV_ASSERT(source < NvRmClockSource_Num);
+ if (source != NvRmClockSource_Invalid)
+ {
+ SourcesNum++;
+ if ((*pListSize) != 0)
+ {
+ *phSourceList = s_Sources.hSources[source];
+ NV_ASSERT(*phSourceList);
+ phSourceList++;
+ if (SourcesNum >= (*pListSize))
+ break;
+ }
+ }
+ }
+ *pListSize = SourcesNum;
+ return NvSuccess;
+}
+
+NvError
+NvRmDiagModuleClockEnable(
+ NvRmDiagModuleID id,
+ NvBool enable)
+{
+ NvU32 reg, offset;
+ const NvRmModuleClockInfo* pCinfo = NULL;
+ NvU32 Instance = NVRM_DIAG_MODULE_INSTANCE(id);
+ NvRmDiagModuleID Module = NVRM_DIAG_MODULE_ID(id);
+
+ if (s_hDiagRm == NULL)
+ {
+ return NvError_NotInitialized;
+ }
+
+ // Verify module id and get module info
+ NV_ASSERT((Module < NvRmDiagModuleID_Num) &&
+ (Instance < s_Modules.InstancesMap[Module].InstancesNum));
+ pCinfo =
+ s_Modules.pInstancePtrs[s_Modules.InstancesMap[Module].BaseIndex + Instance];
+
+ // Set/Clear clock control bit(s), if any
+ if (pCinfo->ClkEnableField != 0)
+ {
+ offset = pCinfo->ClkEnableOffset;
+ NV_ASSERT(offset);
+ reg = NV_REGR(s_hDiagRm, NvRmPrivModuleID_ClockAndReset, 0, offset);
+ reg = enable ?
+ (reg | pCinfo->ClkEnableField) : (reg & (~ pCinfo->ClkEnableField));
+ NV_REGW(s_hDiagRm, NvRmPrivModuleID_ClockAndReset, 0, offset, reg);
+ }
+ return NvSuccess;
+}
+
+NvError
+NvRmDiagModuleClockConfigure(
+ NvRmDiagModuleID id,
+ NvRmDiagClockSourceHandle hSource,
+ NvU32 divider,
+ NvBool Source1st)
+{
+ NvU32 reg, offset, SrcIndex;
+ const NvRmModuleClockInfo* pCinfo = NULL;
+ NvU32 Instance = NVRM_DIAG_MODULE_INSTANCE(id);
+ NvRmDiagModuleID Module = NVRM_DIAG_MODULE_ID(id);
+
+ if (s_hDiagRm == NULL)
+ {
+ return NvError_NotInitialized;
+ }
+
+ // Verify source handle, module id, and get module info
+ NV_ASSERT((hSource != NULL) &&
+ (Module < NvRmDiagModuleID_Num) &&
+ (Instance < s_Modules.InstancesMap[Module].InstancesNum));
+ pCinfo =
+ s_Modules.pInstancePtrs[s_Modules.InstancesMap[Module].BaseIndex + Instance];
+
+ /*
+ * Find source index for the specified module and source handle. If not
+ * found report invalid handle. If module has fixed clock source and no
+ * divider, return success.
+ */
+ for (SrcIndex = 0; SrcIndex < NvRmClockSource_Num; SrcIndex++)
+ {
+ if (hSource->SourceId == pCinfo->Sources[SrcIndex])
+ break;
+ }
+ NV_ASSERT(SrcIndex != NvRmClockSource_Num);
+ if ((pCinfo->SourceFieldMask == 0) && (pCinfo->DivisorFieldMask == 0))
+ {
+ return NvSuccess;
+ }
+ NV_ASSERT(SrcIndex <= pCinfo->SourceFieldMask);
+
+ /*
+ * Adjust divider valuse: if module divider is not fractional, shift out
+ * half step bit. In any case truncate high divider bits to fit module
+ * divider field.
+ */
+ if (pCinfo->Divider != NvRmClockDivider_Fractional_2)
+ {
+ divider >>= 1;
+ }
+ divider &= pCinfo->DivisorFieldMask;
+
+ /*
+ * Update clock control register. The order of source and divider fields
+ * update is specified by the caller. Insert delay between the updates.
+ */
+ offset = pCinfo->ClkSourceOffset;
+ NV_ASSERT(offset);
+ reg = NV_REGR(s_hDiagRm, NvRmPrivModuleID_ClockAndReset, 0, offset);
+ if (Source1st)
+ {
+ reg &= (~(pCinfo->SourceFieldMask << pCinfo->SourceFieldShift));
+ reg |= (SrcIndex << pCinfo->SourceFieldShift);
+ NV_REGW(s_hDiagRm, NvRmPrivModuleID_ClockAndReset, 0, offset, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ }
+ if (pCinfo->Divider != NvRmClockDivider_None)
+ {
+ reg &= (~(pCinfo->DivisorFieldMask << pCinfo->DivisorFieldShift));
+ reg |= (divider << pCinfo->DivisorFieldShift);
+ NV_REGW(s_hDiagRm, NvRmPrivModuleID_ClockAndReset, 0, offset, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ }
+ if (!Source1st)
+ {
+ reg &= (~(pCinfo->SourceFieldMask << pCinfo->SourceFieldShift));
+ reg |= (SrcIndex << pCinfo->SourceFieldShift);
+ NV_REGW(s_hDiagRm, NvRmPrivModuleID_ClockAndReset, 0, offset, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+ }
+ return NvSuccess;
+}
+
+NvError
+NvRmDiagModuleReset(
+ NvRmDiagModuleID id,
+ NvBool KeepAsserted)
+{
+ NvU32 reg, offset;
+ const NvRmModuleClockInfo* pCinfo = NULL;
+ NvU32 Instance = NVRM_DIAG_MODULE_INSTANCE(id);
+ NvRmDiagModuleID Module = NVRM_DIAG_MODULE_ID(id);
+
+ if (s_hDiagRm == NULL)
+ {
+ return NvError_NotInitialized;
+ }
+
+ // Verify module id and get module info
+ NV_ASSERT((Module < NvRmDiagModuleID_Num) &&
+ (Instance < s_Modules.InstancesMap[Module].InstancesNum));
+ pCinfo =
+ s_Modules.pInstancePtrs[s_Modules.InstancesMap[Module].BaseIndex + Instance];
+
+ /*
+ * Assert reset bit and keep it asserted if requested by the caller.
+ * Otherwise de-assert reset after the delay.
+ */
+ offset = pCinfo->ClkResetOffset;
+ NV_ASSERT(offset);
+ reg = NV_REGR(s_hDiagRm, NvRmPrivModuleID_ClockAndReset, 0, offset);
+ reg |= pCinfo->ClkResetField;
+ NV_REGW(s_hDiagRm, NvRmPrivModuleID_ClockAndReset, 0, offset, reg);
+ if (!KeepAsserted)
+ {
+ NvOsWaitUS(NVRM_RESET_DELAY);
+ reg &= (~(pCinfo->ClkResetField));
+ NV_REGW(s_hDiagRm, NvRmPrivModuleID_ClockAndReset, 0, offset, reg);
+ }
+ return NvSuccess;
+}
+
+/*****************************************************************************/
+
+NvU64 NvRmDiagClockSourceGetName(
+ NvRmDiagClockSourceHandle hSource)
+{
+ if ((s_hDiagRm == NULL) ||
+ (hSource == NULL) ||
+ (hSource->SourceId == NvRmClockSource_Invalid) ||
+ (hSource->SourceId >= NvRmClockSource_Num))
+ {
+ return 0;
+ }
+ return s_ApClockSourceNames[hSource->SourceId];
+}
+
+NvRmDiagClockSourceType
+NvRmDiagClockSourceGetType(NvRmDiagClockSourceHandle hSource)
+{
+ if ((s_hDiagRm == NULL) || (hSource == NULL))
+ {
+ return 0;
+ }
+ // Map RM source types to diagnostic source types
+ switch (hSource->SourceType)
+ {
+ case NvRmClockSourceType_Fixed:
+ return NvRmDiagClockSourceType_Oscillator;
+ case NvRmClockSourceType_Pll:
+ return NvRmDiagClockSourceType_Pll;
+ case NvRmClockSourceType_Divider:
+ case NvRmClockSourceType_Core:
+ case NvRmClockSourceType_Selector:
+ return NvRmDiagClockSourceType_Scaler;
+ default:
+ NV_ASSERT(!"Invalid source type");
+ return 0;
+ }
+}
+
+// TODO: does diagnostic scripts really need these details on scaler types?
+NvRmDiagClockScalerType
+NvRmDiagClockSourceGetScaler(NvRmDiagClockSourceHandle hSource)
+{
+ if ((s_hDiagRm == NULL) || (hSource == NULL))
+ {
+ return 0;
+ }
+ // Map RM divider types to diagnostic scaler types
+ switch (hSource->SourceType)
+ {
+ case NvRmClockSourceType_Fixed:
+ case NvRmClockSourceType_Pll:
+ return NvRmDiagClockScalerType_NoScaler;
+ case NvRmClockSourceType_Divider:
+ switch (hSource->pInfo.pDivider->Divider)
+ {
+ case NvRmClockDivider_Keeper16:
+ case NvRmClockDivider_Skipper16:
+ return NvRmDiagClockScalerType_Divider_M_16;
+ case NvRmClockDivider_Fractional_2:
+ case NvRmClockDivider_Integer_1:
+ case NvRmClockDivider_Integer:
+ return NvRmDiagClockScalerType_Divider_1_N;
+ default:
+ NV_ASSERT(!"Invalid divider type");
+ return 0;
+ }
+ case NvRmClockSourceType_Core:
+ return NvRmDiagClockScalerType_Divider_M_N;
+ case NvRmClockSourceType_Selector:
+ return NvRmDiagClockScalerType_Doubler;
+ default:
+ NV_ASSERT(!"Invalid source type");
+ return 0;
+ }
+}
+
+NvError
+NvRmDiagClockSourceListSources(
+ NvRmDiagClockSourceHandle hSource,
+ NvU32* pListSize,
+ NvRmDiagClockSourceHandle * phSourceList)
+{
+ NvRmClockSource source = NvRmClockSource_Invalid;
+ NvRmClockSource* Sources = NULL;
+
+ NV_ASSERT(pListSize);
+ NV_ASSERT(phSourceList);
+
+ if (s_hDiagRm == NULL)
+ {
+ return NvError_NotInitialized;
+ }
+ NV_ASSERT((hSource != NULL) &&
+ (hSource->SourceId != NvRmClockSource_Invalid) &&
+ (hSource->SourceId < NvRmClockSource_Num));
+
+ switch (hSource->SourceType)
+ {
+ // Get input clock ID for single-input clock sources;
+ // (may be invalid for primary sources)
+ case NvRmClockSourceType_Fixed:
+ source = hSource->pInfo.pFixed->InputId;
+ break;
+ case NvRmClockSourceType_Pll:
+ source = hSource->pInfo.pPll->InputId;
+ break;
+ case NvRmClockSourceType_Divider:
+ source = hSource->pInfo.pDivider->InputId;
+ break;
+ // Get pointer to the source array for core and selector sources
+ // (must be valid)
+ case NvRmClockSourceType_Core:
+ Sources = hSource->pInfo.pCore->Sources;
+ NV_ASSERT(Sources);
+ break;
+ case NvRmClockSourceType_Selector:
+ Sources = hSource->pInfo.pSelector->Sources;
+ NV_ASSERT(Sources);
+ break;
+ default:
+ NV_ASSERT(!"Invalid source type");
+ }
+ if (Sources != NULL)
+ {
+ // Return total number of input sources if no room for the output list,
+ // otherwise return sources list (min of requested and total size)
+ NvU32 SourcesNum, i;
+ for (SourcesNum = 0, i = 0; i < NvRmClockSource_Num; i++)
+ {
+ NvRmClockSource source = Sources[i];
+ NV_ASSERT(source < NvRmClockSource_Num);
+ if (source != NvRmClockSource_Invalid)
+ {
+ SourcesNum++;
+ if ((*pListSize) != 0)
+ {
+ *phSourceList = s_Sources.hSources[source];
+ NV_ASSERT(*phSourceList);
+ phSourceList++;
+ if (SourcesNum >= (*pListSize))
+ break;
+ }
+ }
+ }
+ *pListSize = SourcesNum;
+ }
+ else if (source != NvRmClockSource_Invalid)
+ {
+ //Only one input source is available. Return the resepctive handle
+ // if requested.
+ NV_ASSERT(source < NvRmClockSource_Num);
+ if ((*pListSize) != 0)
+ *phSourceList = s_Sources.hSources[source];
+ *pListSize = 1;
+ }
+ else
+ {
+ // Primary source (e.g., oscillator). No (= zero) input sources.
+ *pListSize = 0;
+ }
+ return NvSuccess;
+}
+
+/*****************************************************************************/
+
+NvU32 NvRmDiagOscillatorGetFreq(NvRmDiagClockSourceHandle hOscillator)
+{
+
+ if ((s_hDiagRm == NULL) || (hOscillator == NULL) ||
+ (hOscillator->SourceId == NvRmClockSource_Invalid) ||
+ (hOscillator->SourceType != NvRmClockSourceType_Fixed))
+ {
+ return 0;
+ }
+ return NvRmPrivGetClockSourceFreq(hOscillator->SourceId);
+}
+
+NvError
+NvRmDiagPllConfigure(
+ NvRmDiagClockSourceHandle hPll,
+ NvU32 M,
+ NvU32 N,
+ NvU32 P)
+{
+ if (s_hDiagRm == NULL)
+ {
+ return NvError_NotInitialized;
+ }
+ NV_ASSERT((hPll != NULL) &&
+ (hPll->SourceId != NvRmClockSource_Invalid) &&
+ (hPll->SourceType == NvRmClockSourceType_Pll));
+
+ NvRmPrivAp15PllSet(s_hDiagRm, hPll->pInfo.pPll, M, N, P, (NvU32)-1,
+ 0, 0, NV_TRUE, NvRmPllConfigFlags_Override);
+ return NvSuccess;
+}
+
+NvError
+NvRmDiagClockScalerConfigure(
+ NvRmDiagClockSourceHandle hScaler,
+ NvRmDiagClockSourceHandle hInput,
+ NvU32 M,
+ NvU32 N)
+{
+ NvU32 setting = 0;
+
+ if (s_hDiagRm == NULL)
+ {
+ return NvError_NotInitialized;
+ }
+ NV_ASSERT(hScaler != NULL);
+
+ switch (hScaler->SourceType)
+ {
+ case NvRmClockSourceType_Divider:
+ switch (hScaler->pInfo.pDivider->Divider)
+ {
+ case NvRmClockDivider_Keeper16:
+ setting = M >> 1;
+ break;
+ case NvRmClockDivider_Skipper16:
+ setting = (~(M >> 1));
+ break;
+ case NvRmClockDivider_Fractional_2:
+ setting = N;
+ break;
+ case NvRmClockDivider_Integer_1:
+ case NvRmClockDivider_Integer:
+ setting = N >> 1;
+ break;
+ default:
+ NV_ASSERT(!"Invalid divider type");
+ }
+ NvRmPrivDividerSet(s_hDiagRm, hScaler->pInfo.pDivider, setting);
+ return NvSuccess;
+
+ case NvRmClockSourceType_Core:
+ NvRmPrivCoreClockSet(s_hDiagRm, hScaler->pInfo.pCore,
+ hInput->SourceId, (M >> 1), (N >> 1));
+ break;
+ case NvRmClockSourceType_Selector:
+ NvRmPrivSelectorClockSet(s_hDiagRm, hScaler->pInfo.pSelector,
+ hInput->SourceId, (M != 0));
+ break;
+ case NvRmClockSourceType_Pll:
+ case NvRmClockSourceType_Fixed:
+ NV_ASSERT(!" Diag Clock Scaler Config: illegal clock source. ");
+ break;
+ default:
+ NV_ASSERT(!"Invalid source type");
+ break;
+ }
+ return NvSuccess;
+}
+
+/*****************************************************************************/
+
+/*
+ * Gets power group for the specified module if it is one of system modules, not
+ * present in the relocation table. Otherwise, returns NV_POWERGROUP_INVALID.
+ */
+static NvU32
+DiagGetSystemModulePowerGroup(const NvRmModuleClockInfo* pCinfo);
+
+static NvU32
+DiagGetSystemModulePowerGroup(const NvRmModuleClockInfo* pCinfo)
+{
+ NvU32 PowerGroup = NV_POWERGROUP_INVALID;
+ switch (pCinfo->Module)
+ {
+ case NvRmModuleID_CacheMemCtrl:
+ if (pCinfo->Instance == 0)
+ break; // CPU cache controller is present
+ // fall through if AVP cache controller
+ case NvRmModuleID_Invalid:
+ case NvRmPrivModuleID_System:
+ case NvRmModuleID_Avp:
+ PowerGroup = NV_POWERGROUP_NPG;
+ break;
+ case NvRmModuleID_Cpu:
+ PowerGroup = NV_POWERGROUP_CPU;
+ break;
+ default:
+ break;
+ }
+ return PowerGroup;
+}
+
+NvError
+NvRmDiagListPowerRails(
+ NvU32* pListSize,
+ NvRmDiagPowerRailHandle* phRailList)
+{
+ NvU32 RailsNum, i;
+
+ NV_ASSERT(pListSize);
+ NV_ASSERT(phRailList);
+
+ if (s_hDiagRm == NULL)
+ {
+ return NvError_NotInitialized;
+ }
+ RailsNum = s_Rails.PowerRailsNum;
+
+ // Return total number of rails if no room for the output list
+ if ((*pListSize) == 0)
+ {
+ *pListSize = RailsNum;
+ return NvSuccess;
+ }
+
+ // Return rails list (min of requested and total size)
+ if ((*pListSize) > RailsNum)
+ {
+ (*pListSize) = RailsNum;
+ }
+ for (i = 0; i < (*pListSize); i++, phRailList++)
+ {
+ *phRailList = (NvRmDiagPowerRailHandle)&s_Rails.PowerRailsTable[i];
+ }
+ return NvSuccess;
+}
+
+NvU64
+NvRmDiagPowerRailGetName(NvRmDiagPowerRailHandle hRail)
+{
+ if ((s_hDiagRm == NULL) || (hRail == NULL))
+ {
+ return 0;
+ }
+ return hRail->PowerRailId;
+}
+
+NvError
+NvRmDiagModuleListPowerRails(
+ NvRmDiagModuleID id,
+ NvU32* pListSize,
+ NvRmDiagPowerRailHandle* phRailList)
+{
+
+ NvU32 ModulePowerGroup, i;
+ const NvRmDiagPowerRail* pRail = NULL;
+ const NvRmModuleClockInfo* pCinfo = NULL;
+ NvU32 Instance = NVRM_DIAG_MODULE_INSTANCE(id);
+ NvRmDiagModuleID Module = NVRM_DIAG_MODULE_ID(id);
+
+ NV_ASSERT(pListSize);
+ NV_ASSERT(phRailList);
+
+ if (s_hDiagRm == NULL)
+ {
+ return NvError_NotInitialized;
+ }
+
+ // Verify module id
+ NV_ASSERT((Module < NvRmDiagModuleID_Num) &&
+ (Instance < s_Modules.InstancesMap[Module].InstancesNum));
+
+ // One rail per module; just return if no room to return handle
+ if ((*pListSize) == 0)
+ {
+ *pListSize = 1;
+ return NvSuccess;
+ }
+
+ // Get module power group
+ pCinfo =
+ s_Modules.pInstancePtrs[s_Modules.InstancesMap[Module].BaseIndex + Instance];
+ ModulePowerGroup = DiagGetSystemModulePowerGroup(pCinfo);
+ if (ModulePowerGroup == NV_POWERGROUP_INVALID)
+ {
+ NvRmModuleInstance* pInst = NULL;
+ NV_ASSERT_SUCCESS(NvRmPrivGetModuleInstance(
+ s_hDiagRm, NVRM_MODULE_ID(pCinfo->Module, pCinfo->Instance), &pInst));
+ ModulePowerGroup = pInst->DevPowerGroup;
+ }
+ NV_ASSERT(ModulePowerGroup != NV_POWERGROUP_INVALID);
+
+ // Find the power rail for the group
+ for (i = 0; i < s_Rails.PowerRailsNum; i++)
+ {
+ const NvU32* pPowerGroup = s_Rails.PowerRailsTable[i].PowerRailGroups;
+ while ((*pPowerGroup) != NV_POWERGROUP_INVALID)
+ {
+ if ((*pPowerGroup) == ModulePowerGroup)
+ {
+ pRail = &s_Rails.PowerRailsTable[i];
+ break;
+ }
+ pPowerGroup++;
+ NV_ASSERT(pPowerGroup < (s_Rails.PowerRailsTable[i].PowerRailGroups +
+ NV_ARRAY_SIZE(s_Rails.PowerRailsTable[i].PowerRailGroups)));
+ }
+ if (pRail)
+ break;
+ }
+
+ // Return power rail found
+ NV_ASSERT(pRail);
+ *phRailList = (NvRmDiagPowerRailHandle)pRail;
+ *pListSize = 1;
+ return NvSuccess;
+}
+
+NvError
+NvRmDiagConfigurePowerRail(
+ NvRmDiagPowerRailHandle hRail,
+ NvU32 VoltageMV)
+{
+ NvU32 TimeUs = 0;
+ NvU32 RailAddress = 0;
+ const NvOdmPeripheralConnectivity* pPmuRail = NULL;
+
+ if (s_hDiagRm == NULL)
+ {
+ return NvError_NotInitialized;
+ }
+
+ // Verify that targeted rail can be found on the board, and
+ // it is connected to PMU
+ if (hRail != NULL)
+ {
+ pPmuRail = NvOdmPeripheralGetGuid(hRail->PowerRailId);
+ }
+ if((pPmuRail == NULL) || (pPmuRail->NumAddress == 0))
+ {
+ NV_ASSERT(!"Invalid power rail");
+ return NvError_NotSupported;
+ }
+
+ // Change voltage, and wait for settling time.
+ RailAddress = pPmuRail->AddressList[0].Address;
+ NVRM_DIAG_PRINTF(("Setting PMU rail %2d to %5dmV\n", RailAddress, VoltageMV));
+ if (NvRmPrivDiagPmuSetVoltage(s_hDiagRm, RailAddress, VoltageMV, &TimeUs))
+ {
+ NvOsWaitUS(TimeUs);
+ return NvSuccess;
+ }
+ return NvError_Busy;
+}
+
+/*****************************************************************************/
+
+static NvRmModuleID
+MapDiagIdToRmId(NvRmDiagModuleID DiagId)
+{
+ const NvRmModuleClockInfo* pCinfo = NULL;
+ NvU32 Instance = NVRM_DIAG_MODULE_INSTANCE(DiagId);
+ NvRmDiagModuleID Module = NVRM_DIAG_MODULE_ID(DiagId);
+
+ NvRmModuleID RmId = NvRmModuleID_Invalid;
+ if ((Module < NvRmDiagModuleID_Num) &&
+ (Instance < s_Modules.InstancesMap[Module].InstancesNum))
+ {
+ pCinfo = s_Modules.pInstancePtrs[
+ s_Modules.InstancesMap[Module].BaseIndex + Instance];
+ RmId = NVRM_MODULE_ID(pCinfo->Module, pCinfo->Instance);
+ }
+ return RmId;
+}
+
+NvBool NvRmPrivIsDiagMode(NvRmModuleID ModuleId)
+{
+ if (s_hDiagRm == NULL)
+ return NV_FALSE; // Report no diagnostic in progress
+
+ if (ModuleId == NvRmModuleID_Invalid)
+ return NV_TRUE; // Report diagnostic is in progress
+
+ // Report diagnostic is in progress for any module except PMU bus host
+ return (ModuleId != s_Rails.PmuBusHostRmId);
+}
+
+NvBool NvRmDiagIsLockSupported(void)
+{
+#if NVRM_DIAG_LOCK_SUPPORTED
+ return NV_TRUE;
+#else
+ return NV_FALSE;
+#endif
+}
+
+/*****************************************************************************/
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/Makefile b/arch/arm/mach-tegra/nvrm/core/ap20/Makefile
new file mode 100644
index 000000000000..4fa16d6a6426
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/Makefile
@@ -0,0 +1,18 @@
+ccflags-y += -DNV_IS_AVP=0
+ccflags-y += -DNV_OAL=0
+ccflags-y += -DNV_USE_FUSE_CLOCK_ENABLE=0
+ifeq ($(CONFIG_MACH_TEGRA_GENERIC_DEBUG),y)
+ccflags-y += -DNV_DEBUG=1
+else
+ccflags-y += -DNV_DEBUG=0
+endif
+
+obj-y += ap20rm_reloctable.o
+obj-y += ap20rm_clocks.o
+obj-y += ap20rm_clock_config.o
+obj-y += ap20rm_memctrl.o
+obj-y += ap20rm_power_dfs.o
+obj-y += ap20rm_pinmux_tables.o
+obj-y += ap20rm_fuse.o
+obj-y += ap20rm_clocks_info.o
+obj-y += ap20rm_gart.o
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c
new file mode 100644
index 000000000000..df447979a883
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c
@@ -0,0 +1,1546 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvassert.h"
+#include "nvrm_clocks.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_drf.h"
+#include "ap20rm_clocks.h"
+#include "ap20/aremc.h"
+#include "ap20/arclk_rst.h"
+#include "ap20/arapbpm.h"
+#include "ap15/ap15rm_private.h"
+#include "nvodm_query.h"
+
+// TODO: do we need CPU/EMC ratio policy?
+#define NVRM_CPU_EMC_SW_RATIO (0)
+
+// Default CPU power good delay
+#define NVRM_DEFAULT_CPU_PWRGOOD_US (2000)
+
+// Default PMU accuracy %
+#define NVRM_DEFAULT_PMU_ACCURACY_PCT (3)
+
+// Minimum core over CPU voltage margin (at SoC)
+#define NV_AP20_CORE_OVER_CPU_MV (100)
+
+/*****************************************************************************/
+
+/*
+ * TODO: Basic DFS clock control policy outline:
+ */
+
+// Limit frequencies ratio for AHB : System >= 1:2 and APB : System >= 1 : 4
+#define LIMIT_SYS_TO_AHB_APB_RATIOS (1)
+
+// PLLP2 must be used as a variable low frequency source for System clock.
+#define PLLP_POLICY_ENTRY(KHz) \
+ { NvRmClockSource_PllP2,\
+ (NVRM_PLLP_FIXED_FREQ_KHZ * 2)/((NVRM_PLLP_FIXED_FREQ_KHZ * 2)/KHz),\
+ ((NVRM_PLLP_FIXED_FREQ_KHZ * 2)/KHz - 2)\
+ },
+static const NvRmDfsSource s_Ap20PllPSystemClockPolicy[] =
+{
+ NVRM_AP20_PLLP_POLICY_SYSTEM_CLOCK
+};
+static const NvU32 s_Ap20PllPSystemClockPolicyEntries =
+ NV_ARRAY_SIZE(s_Ap20PllPSystemClockPolicy);
+#undef PLLP_POLICY_ENTRY
+
+
+// PLLP4 must be used as a variable low frequency source for cpu clock.
+#define PLLP_POLICY_ENTRY(KHz) \
+ { NvRmClockSource_PllP4,\
+ (NVRM_PLLP_FIXED_FREQ_KHZ * 2)/((NVRM_PLLP_FIXED_FREQ_KHZ * 2)/KHz),\
+ ((NVRM_PLLP_FIXED_FREQ_KHZ * 2)/KHz - 2)\
+ },
+static const NvRmDfsSource s_Ap20PllPCpuClockPolicy[] =
+{
+ NVRM_AP20_PLLP_POLICY_CPU_CLOCK
+};
+static const NvU32 s_Ap20PllPCpuClockPolicyEntries =
+ NV_ARRAY_SIZE(s_Ap20PllPCpuClockPolicy);
+#undef PLLP_POLICY_ENTRY
+
+// EMC timing registers
+static const NvU32 s_EmcTimingRegAddrRev20[] =
+{
+ EMC_RC_0, /* RC */
+ EMC_RFC_0, /* RFC */
+ EMC_RAS_0, /* RAS */
+ EMC_RP_0, /* RP */
+ EMC_R2W_0, /* R2W */
+ EMC_W2R_0, /* W2R */
+ EMC_R2P_0, /* R2P */
+ EMC_W2P_0, /* W2P */
+ EMC_RD_RCD_0, /* RD_RCD */
+ EMC_WR_RCD_0, /* WR_RCD */
+ EMC_RRD_0, /* RRD */
+ EMC_REXT_0, /* REXT */
+ EMC_WDV_0, /* WDV */
+ EMC_QUSE_0, /* QUSE */
+ EMC_QRST_0, /* QRST */
+ EMC_QSAFE_0, /* QSAFE */
+ EMC_RDV_0, /* RDV */
+ EMC_REFRESH_0, /* REFRESH */
+ EMC_BURST_REFRESH_NUM_0, /* BURST_REFRESH_NUM */
+ EMC_PDEX2WR_0, /* PDEX2WR */
+ EMC_PDEX2RD_0, /* PDEX2RD */
+ EMC_PCHG2PDEN_0, /* PCHG2PDEN */
+ EMC_ACT2PDEN_0, /* ACT2PDEN */
+ EMC_AR2PDEN_0, /* AR2PDEN */
+ EMC_RW2PDEN_0, /* RW2PDEN */
+ EMC_TXSR_0, /* TXSR */
+ EMC_TCKE_0, /* TCKE */
+ EMC_TFAW_0, /* TFAW */
+ EMC_TRPAB_0, /* TRPAB */
+ EMC_TCLKSTABLE_0, /* TCLKSTABLE */
+ EMC_TCLKSTOP_0, /* TCLKSTOP */
+ EMC_TREFBW_0, /* TREFBW */
+ EMC_QUSE_EXTRA_0, /* QUSE_EXTRA */
+ EMC_FBIO_CFG6_0, /* FBIO_CFG6 */
+ EMC_ODT_WRITE_0, /* ODT_WRITE */
+ EMC_ODT_READ_0, /* ODT_READ */
+ EMC_FBIO_CFG5_0, /* FBIO_CFG5 */
+ EMC_CFG_DIG_DLL_0, /* CFG_DIG_DLL */
+ EMC_DLL_XFORM_DQS_0, /* DLL_XFORM_DQS */
+ EMC_DLL_XFORM_QUSE_0, /* DLL_XFORM_QUSE */
+ EMC_ZCAL_REF_CNT_0, /* ZCAL_REF_CNT */
+ EMC_ZCAL_WAIT_CNT_0, /* ZCAL_WAIT_CNT */
+ EMC_AUTO_CAL_INTERVAL_0, /* AUTO_CAL_INTERVAL */
+ EMC_CFG_CLKTRIM_0_0, /* CFG_CLKTRIM_0 */
+ EMC_CFG_CLKTRIM_1_0, /* CFG_CLKTRIM_1 */
+ EMC_CFG_CLKTRIM_2_0, /* CFG_CLKTRIM_2 */
+};
+
+// Sorted list of timing parameters for discrete set of EMC frequencies used
+// by DFS; entry 0 specifies timing parameters for PLLM0 output frequency.
+static NvRmAp20EmcTimingConfig
+s_Ap20EmcConfigSortedTable[NVRM_AP20_DFS_EMC_FREQ_STEPS];
+
+static struct Ap20EmcConfigRec
+{
+ // Index of selected EMC configuration entry
+ NvU32 Index;
+
+ // Status of undivided PLLM0 path
+ NvBool UdPllM0;
+
+ // Pointers to EMC clock state
+ NvRmModuleClockState* pEmc2xState;
+
+ // Pointers to EMC clock descriptors
+ NvRmModuleClockInfo* pEmcInfo;
+
+ // Array of EMC timing registers
+ const NvU32* pEmcTimingReg;
+
+ // Total number of EMC timing registers
+ NvU32 EmcTimingRegNum;
+
+} s_Ap20EmcConfig = {0};
+
+static struct Ap20VdeConfigRec
+{
+ // Pointer to VDE clock descriptor
+ NvRmModuleClockInfo* pVdeInfo;
+
+ // Pointer to VDE clock state
+ NvRmModuleClockState* pVdeState;
+
+} s_Ap20VdeConfig = {0};
+
+static struct Ap20CpuConfigRec
+{
+ // Number of PLLX frequency steps
+ NvU32 PllXStepsNo;
+
+ // PLLX frequency steps table pointer
+ const NvRmFreqKHz* pPllXStepsKHz;
+
+ // CPU power good delay in microseconds
+ NvU32 CpuPowerGoodUs;
+
+ // Core over CPU voltage dependency parameters:
+ // Vcore >= CoreOverCpuSlope * Vcpu + CoreOverCpuOffset
+ NvU32 CoreOverCpuOffset;
+ NvU32 CoreOverCpuSlope;
+
+} s_Ap20CpuConfig = {0};
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+static void
+Ap20Emc2xClockStateUpdate(
+ NvRmDeviceHandle hRmDevice)
+{
+ NvU32 reg;
+ NvRmFreqKHz SourceClockFreq;
+ NvRmModuleClockInfo* pCinfo = s_Ap20EmcConfig.pEmcInfo;
+ NvRmModuleClockState* pCstate = s_Ap20EmcConfig.pEmc2xState;
+
+ NV_ASSERT(pCinfo && pCstate);
+
+ // Determine EMC2x source and divider setting; update EMC2x clock state
+ reg = NV_REGR(hRmDevice,
+ NvRmPrivModuleID_ClockAndReset, 0, pCinfo->ClkSourceOffset);
+ pCstate->Divider =
+ ((reg >> pCinfo->DivisorFieldShift) & pCinfo->DivisorFieldMask);
+ pCstate->SourceClock =
+ ((reg >> pCinfo->SourceFieldShift) & pCinfo->SourceFieldMask);
+ s_Ap20EmcConfig.UdPllM0 = NV_DRF_VAL(CLK_RST_CONTROLLER,
+ CLK_SOURCE_EMC, USE_PLLM_UD, reg) ? NV_TRUE : NV_FALSE;
+ if (s_Ap20EmcConfig.UdPllM0)
+ {
+ NV_ASSERT( // policy: Src/Div settings must be synced with UD path
+ (pCstate->Divider == 0) && (pCstate->SourceClock ==
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_SRC_PLLM_OUT0));
+ }
+ SourceClockFreq =
+ NvRmPrivGetClockSourceFreq(pCinfo->Sources[pCstate->SourceClock]);
+
+ // Fractional divider output = (Source Frequency * 2) / (divider + 2)
+ pCstate->actual_freq = ((SourceClockFreq << 1) / (pCstate->Divider + 2));
+}
+
+static NvBool
+Ap20EmcClkChangeConfig(
+ NvRmDeviceHandle hRmDevice)
+{
+ NvU32 cfg2, cfg5;
+
+ cfg2 = NV_REGR(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_CFG_2_0);
+ cfg5 = NV_REGR(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_FBIO_CFG5_0);
+
+ switch (NV_DRF_VAL(EMC, FBIO_CFG5, DRAM_TYPE, cfg5))
+ {
+ case EMC_FBIO_CFG5_0_DRAM_TYPE_DDR1:
+ case EMC_FBIO_CFG5_0_DRAM_TYPE_LPDDR2:
+ cfg2 = NV_FLD_SET_DRF_DEF(
+ EMC, CFG_2, CLKCHANGE_PD_ENABLE, ENABLED, cfg2);
+ cfg2 = NV_FLD_SET_DRF_DEF(
+ EMC, CFG_2, CLKCHANGE_SR_ENABLE, DISABLED, cfg2);
+ break;
+ case EMC_FBIO_CFG5_0_DRAM_TYPE_DDR2:
+ cfg2 = NV_FLD_SET_DRF_DEF(
+ EMC, CFG_2, CLKCHANGE_PD_ENABLE, DISABLED, cfg2);
+ cfg2 = NV_FLD_SET_DRF_DEF(
+ EMC, CFG_2, CLKCHANGE_SR_ENABLE, ENABLED, cfg2);
+ break;
+ default:
+ NV_ASSERT(!"Not supported DRAM type");
+ return NV_FALSE;
+ }
+ cfg2 = NV_FLD_SET_DRF_DEF(
+ EMC, CFG_2, CLKCHANGE_REQ_ENABLE, ENABLED, cfg2);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_CFG_2_0, cfg2);
+ return NV_TRUE;
+}
+
+static void Ap20EmcConfigInit(NvRmDeviceHandle hRmDevice)
+{
+ NvRmFreqKHz Emc2xKHz, SourceKHz;
+ NvU32 i, j, k, Source, ConfigurationsCount, UndividedIndex;
+ NvU32 Revision = 0;
+
+ NvRmFreqKHz PllM0KHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM0);
+ NvRmFreqKHz PllP0KHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllP0);
+ const NvRmModuleClockLimits* pEmcClockLimits =
+ NvRmPrivGetSocClockLimits(NvRmPrivModuleID_ExternalMemoryController);
+ const NvOdmSdramControllerConfigAdv* pEmcConfigurations =
+ NvOdmQuerySdramControllerConfigGet(&ConfigurationsCount, &Revision);
+
+ // Init memory configuration structure
+ NV_ASSERT_SUCCESS(NvRmPrivGetClockState(
+ hRmDevice, NvRmPrivModuleID_ExternalMemoryController,
+ &s_Ap20EmcConfig.pEmcInfo, &s_Ap20EmcConfig.pEmc2xState));
+
+ s_Ap20EmcConfig.pEmcTimingReg = &s_EmcTimingRegAddrRev20[0];
+ s_Ap20EmcConfig.EmcTimingRegNum = NV_ARRAY_SIZE(s_EmcTimingRegAddrRev20);
+ s_Ap20EmcConfig.Index = NVRM_AP20_DFS_EMC_FREQ_STEPS; // invalid index
+
+ // Clean table, which invalidates PLLM0 entry - no EMC DFS if exits
+ // before sorting below
+ NvOsMemset(s_Ap20EmcConfigSortedTable, 0,
+ sizeof(s_Ap20EmcConfigSortedTable));
+
+ // Get EMC2x clock state from h/w
+ Ap20Emc2xClockStateUpdate(hRmDevice);
+
+ // Configure EMC clock change mechanism - exit if not supported
+ if (!Ap20EmcClkChangeConfig(hRmDevice))
+ return;
+
+ // Check if configuration table is provided by ODM
+ if ((ConfigurationsCount == 0) || (pEmcConfigurations == NULL))
+ return;
+
+ // EMC DVFS is supported on AP20 starting with A02 chip
+ if ((hRmDevice->ChipId.Major == 1) && (hRmDevice->ChipId.Minor <= 1))
+ return;
+
+ // Only 2.0 table revision is supported
+ if (Revision != 0x20)
+ {
+ NV_ASSERT(!"Invalid configuration table revision");
+ return;
+ }
+
+ // Check PLLs clock range
+ if ((PllP0KHz < pEmcClockLimits->MinKHz) ||
+ (PllP0KHz > pEmcClockLimits->MaxKHz))
+ {
+ NV_ASSERT(!"PLLP0 is outside supported EMC range");
+ return;
+ }
+ if ((PllM0KHz < pEmcClockLimits->MinKHz) ||
+ (PllM0KHz > pEmcClockLimits->MaxKHz))
+ {
+ NV_ASSERT(!"PLLM0 is outside supported EMC range");
+ return;
+ }
+
+ // Check if PLLM0 is configured by boot loader as EMC clock source
+ if (s_Ap20EmcConfig.pEmc2xState->SourceClock !=
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_SRC_PLLM_OUT0)
+ {
+ NV_ASSERT(!"Other than PLLM0 clock source is used for EMC");
+ return;
+ }
+
+ // Sort list of EMC timing parameters in descending order of frequencies
+ // evenly divided down from the selected source. Supported sources: PLLM0
+ // PLLP0, and Oscillator
+ Source = CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_SRC_PLLM_OUT0;
+ for (i = 0; i < NVRM_AP20_DFS_EMC_FREQ_STEPS; )
+ {
+ SourceKHz = Emc2xKHz = NvRmPrivGetClockSourceFreq(
+ s_Ap20EmcConfig.pEmcInfo->Sources[Source]);
+
+ for (k = 0, UndividedIndex = i; i < NVRM_AP20_DFS_EMC_FREQ_STEPS; )
+ {
+ s_Ap20EmcConfigSortedTable[i].Emc2xKHz = 0; // mark entry invalid
+ for (j = 0; j < ConfigurationsCount; j++)
+ {
+ // Find match with 0.4% accuracy for ODM configuration
+ if (((pEmcConfigurations[j].SdramKHz * 2) <=
+ (Emc2xKHz + (Emc2xKHz >> 8))) &&
+ ((pEmcConfigurations[j].SdramKHz * 2) >=
+ (Emc2xKHz - (Emc2xKHz >> 8))))
+ {
+ NV_ASSERT(pEmcConfigurations[j].Revision == Revision);
+ NV_ASSERT(pEmcConfigurations[j].EmcTimingParamNum ==
+ s_Ap20EmcConfig.EmcTimingRegNum);
+
+ s_Ap20EmcConfigSortedTable[i].pOdmEmcConfig =
+ &pEmcConfigurations[j];
+
+ s_Ap20EmcConfigSortedTable[i].Emc2xClockSource = Source;
+ s_Ap20EmcConfigSortedTable[i].Emc2xUndividedIndex =
+ UndividedIndex;
+ s_Ap20EmcConfigSortedTable[i].Emc2xKHz = Emc2xKHz;
+
+ /*
+ * The undivided table entry specifies parameters for
+ * EMC2xKHz = SourceKHz; the EMC divisor field is set to
+ * "0". Next table entries specify parameters for EMC2xKHz
+ * = SourceKHz / (2 * k); the EMC divisor field should be
+ * set as 2 * (2 * k) - 2 = 4 * k - 2.
+ */
+ if (k == 0)
+ s_Ap20EmcConfigSortedTable[i].Emc2xDivisor = 0;
+ else
+ s_Ap20EmcConfigSortedTable[i].Emc2xDivisor =
+ (k << 2) - 2;
+ // Check boot configuration (to be recognized boot Src/Div
+ // settings must be synced with UD path)
+ if ((SourceKHz == PllM0KHz) &&
+ (s_Ap20EmcConfigSortedTable[i].Emc2xDivisor ==
+ s_Ap20EmcConfig.pEmc2xState->Divider))
+ {
+ if (s_Ap20EmcConfig.UdPllM0 == (i == 0))
+ s_Ap20EmcConfig.Index = i;
+ }
+ s_Ap20EmcConfigSortedTable[i].CpuLimitKHz =
+ NvRmFreqMaximum;
+ break;
+ }
+ }
+ if (s_Ap20EmcConfigSortedTable[i].Emc2xKHz != 0)
+ i++; // Entry found - advance sorting index
+ else if (k == 0)
+ break; // Abort sorting if undiveded source not found
+
+ Emc2xKHz = SourceKHz / ((++k) << 1);
+ if (Emc2xKHz < pEmcClockLimits->MinKHz)
+ break; // Abort sorting if min frequency reached
+ }
+
+ if (i == 0)
+ break; // Finish sorting if PLLM0 entry not found
+
+ // Next source selection
+ if (Source ==
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_SRC_PLLM_OUT0)
+ {
+ Source = // After PLLM0 try PLLP0 as a source
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_SRC_PLLP_OUT0;
+ }
+ else
+ break; // Finish sorting
+ }
+}
+
+NvRmFreqKHz
+NvRmPrivAp20GetEmcSyncFreq(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module)
+{
+ NvRmFreqKHz FreqKHz;
+
+ switch (Module)
+ {
+ case NvRmModuleID_2D:
+ case NvRmModuleID_Epp:
+ // TODO: establish scales after Ap20 bring-up
+ FreqKHz = NvRmPrivGetSocClockLimits(Module)->MaxKHz;
+ break;
+
+ case NvRmModuleID_GraphicsHost:
+ // TODO: establish level after Ap20 bring-up
+ FreqKHz = NvRmPrivGetSocClockLimits(Module)->MaxKHz;
+ break;
+
+ default:
+ NV_ASSERT(!"Invalid module for EMC synchronization");
+ FreqKHz = NvRmPrivGetSocClockLimits(Module)->MaxKHz;
+ break;
+ }
+ return FreqKHz;
+}
+
+void
+NvRmPrivAp20ClipCpuEmcHighLimits(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz* pCpuHighKHz,
+ NvRmFreqKHz* pEmcHighKHz)
+{
+ // leave requested limits as is - TODO: implement policy?
+}
+
+static void
+Ap20EmcTimingSet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmAp20EmcTimingConfig* pEmcConfig)
+{
+ NvU32 i, a, d;
+
+ // EMC module virtual base address to speed up timing update
+ static void* s_pEmcBaseReg = NULL;
+
+ if (s_pEmcBaseReg == NULL)
+ {
+ NvRmModuleTable *tbl = NvRmPrivGetModuleTable(hRmDevice);
+ s_pEmcBaseReg = (tbl->ModInst +
+ tbl->Modules[NvRmPrivModuleID_ExternalMemoryController].Index)->VirtAddr;
+ }
+ a = (NvU32)s_pEmcBaseReg;
+
+ for (i = 0; i < s_Ap20EmcConfig.EmcTimingRegNum; i++)
+ {
+ d = pEmcConfig->pOdmEmcConfig->EmcTimingParameters[i];
+ a = (((NvU32)(s_pEmcBaseReg)) + s_Ap20EmcConfig.pEmcTimingReg[i]);
+ NV_WRITE32(a, d);
+ }
+ d = NV_READ32(a); // make sure writes are completed
+}
+
+static void
+Ap20EmcDividerBackgroundSet(
+ NvRmDeviceHandle hRmDevice,
+ NvU32 value)
+{
+ NvU32 cfg2, clk;
+
+ // Check if divider is actually to be changed - exit if not
+ clk = cfg2 = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0);
+ clk = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_EMC,
+ EMC_2X_CLK_DIVISOR, value, clk);
+ if (cfg2 == clk)
+ return;
+
+ // Disable EMC clock change request, so that following EMC clock divider
+ // change will not trigger EMC timing switch ("background change")
+ cfg2 = NV_REGR(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_CFG_2_0);
+ NV_ASSERT(NV_DRF_VAL(EMC, CFG_2, CLKCHANGE_REQ_ENABLE, cfg2) == 1);
+ cfg2 = NV_FLD_SET_DRF_DEF(EMC, CFG_2, CLKCHANGE_REQ_ENABLE,
+ DISABLED, cfg2);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_CFG_2_0, cfg2);
+ cfg2 = NV_REGR(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_CFG_2_0); // make sure write is completed
+
+ // Set EMC clock divider (UD bit must be set during "background change")
+ NV_ASSERT(NV_DRF_VAL(CLK_RST_CONTROLLER, CLK_SOURCE_EMC, USE_PLLM_UD, clk));
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0, clk);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+
+ // Restore EMC clock change request
+ cfg2 = NV_FLD_SET_DRF_DEF(EMC, CFG_2, CLKCHANGE_REQ_ENABLE,
+ ENABLED, cfg2);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_CFG_2_0, cfg2);
+ cfg2 = NV_REGR(hRmDevice, NvRmPrivModuleID_ExternalMemoryController, 0,
+ EMC_CFG_2_0); // make sure write is completed
+}
+
+static void
+Ap20EmcSwitchToUndividedPllM0(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmAp20EmcTimingConfig* pEmcConfig)
+{
+ NvU32 reg;
+ NV_ASSERT(pEmcConfig->Emc2xKHz); // validate table entry
+
+ // Update EMC shadow registers
+ Ap20EmcTimingSet(hRmDevice, pEmcConfig);
+
+ // Set EMC clock source as undivided PLLM0 (divider is "don't care" in this
+ // case, so keep it as is to satisfy restriction: source and divider can not
+ // be changed simultaneously)
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_EMC,
+ EMC_2X_CLK_SRC, pEmcConfig->Emc2xClockSource, reg);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_EMC,
+ USE_PLLM_UD, 1, reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+
+ // Now set new divider value. Note that PLLM_UD bit is already set, so
+ // the actual EMC frequency is not changed. Hence, no need to update EMC
+ // timing - the old settings already match the frequency.
+ Ap20EmcDividerBackgroundSet(hRmDevice, pEmcConfig->Emc2xDivisor);
+
+ // Update EMC state
+ s_Ap20EmcConfig.UdPllM0 = NV_TRUE;
+ s_Ap20EmcConfig.pEmc2xState->SourceClock = pEmcConfig->Emc2xClockSource;
+ s_Ap20EmcConfig.pEmc2xState->Divider = pEmcConfig->Emc2xDivisor;
+ s_Ap20EmcConfig.pEmc2xState->actual_freq = pEmcConfig->Emc2xKHz;
+ NvRmPrivMemoryClockReAttach(
+ hRmDevice, s_Ap20EmcConfig.pEmcInfo, s_Ap20EmcConfig.pEmc2xState);
+}
+
+static void
+Ap20EmcSwitchFromUndividedPllM0(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmAp20EmcTimingConfig* pEmcConfig)
+{
+ NvU32 reg;
+ NV_ASSERT(pEmcConfig->Emc2xKHz); // validate table entry
+
+ // 1st set new divider value. Note that PLLM_UD bit is still set, so the
+ // actual EMC frequency is not changed. Hence, no need to update EMC
+ // timing - the old settings already match the frequency.
+ Ap20EmcDividerBackgroundSet(hRmDevice, pEmcConfig->Emc2xDivisor);
+
+ // Update EMC shadow registers
+ Ap20EmcTimingSet(hRmDevice, pEmcConfig);
+
+ // Now set new EMC clock source and disable undivided path (can be done
+ // in one shot - cumulatively it is considered as source change only)
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_EMC,
+ EMC_2X_CLK_SRC, pEmcConfig->Emc2xClockSource, reg);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_EMC,
+ USE_PLLM_UD, 0, reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+
+ // Update EMC state
+ s_Ap20EmcConfig.UdPllM0 = NV_FALSE;
+ s_Ap20EmcConfig.pEmc2xState->SourceClock = pEmcConfig->Emc2xClockSource;
+ s_Ap20EmcConfig.pEmc2xState->Divider = pEmcConfig->Emc2xDivisor;
+ s_Ap20EmcConfig.pEmc2xState->actual_freq = pEmcConfig->Emc2xKHz;
+ NvRmPrivMemoryClockReAttach(
+ hRmDevice, s_Ap20EmcConfig.pEmcInfo, s_Ap20EmcConfig.pEmc2xState);
+}
+
+static void
+Ap20EmcSwitchDividedSources(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmAp20EmcTimingConfig* pEmcConfig)
+{
+ NvU32 reg, div, src;
+ NV_ASSERT(pEmcConfig->Emc2xKHz); // validate table entry
+
+ // Update EMC shadow registers
+ Ap20EmcTimingSet(hRmDevice, pEmcConfig);
+
+ // This switch must be called only when original and target configurations
+ // have either common source or common divider - switch in one shot.
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0);
+ src = NV_DRF_VAL(
+ CLK_RST_CONTROLLER, CLK_SOURCE_EMC, EMC_2X_CLK_SRC, reg);
+ div = NV_DRF_VAL(
+ CLK_RST_CONTROLLER, CLK_SOURCE_EMC, EMC_2X_CLK_DIVISOR, reg);
+ NV_ASSERT((src == pEmcConfig->Emc2xClockSource) ||
+ (div == pEmcConfig->Emc2xDivisor));
+
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_EMC,
+ EMC_2X_CLK_DIVISOR, pEmcConfig->Emc2xDivisor, reg);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_EMC,
+ EMC_2X_CLK_SRC, pEmcConfig->Emc2xClockSource, reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0, reg);
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);
+
+ // Update EMC state (undivided path status not changed)
+ NV_ASSERT(!s_Ap20EmcConfig.UdPllM0);
+ s_Ap20EmcConfig.pEmc2xState->SourceClock = pEmcConfig->Emc2xClockSource;
+ s_Ap20EmcConfig.pEmc2xState->Divider = pEmcConfig->Emc2xDivisor;
+ s_Ap20EmcConfig.pEmc2xState->actual_freq = pEmcConfig->Emc2xKHz;
+ NvRmPrivMemoryClockReAttach(
+ hRmDevice, s_Ap20EmcConfig.pEmcInfo, s_Ap20EmcConfig.pEmc2xState);
+}
+
+static NvBool
+Ap20Emc2xClockSourceFind(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz DomainKHz,
+ NvRmFreqKHz* pCpuTargetKHz,
+ NvRmDfsSource* pDfsSource)
+{
+ NvU32 i;
+ NvBool FinalStep = NV_TRUE;
+ NV_ASSERT(DomainKHz <= MaxKHz);
+ NV_ASSERT(s_Ap20EmcConfigSortedTable[0].Emc2xKHz <= MaxKHz);
+
+ // If PLLM0 entry in EMC frequeuncies table is invalid, EMC frequency
+ // will not be scaled; just fill in current EMC frequency
+ if (s_Ap20EmcConfigSortedTable[0].Emc2xKHz == 0)
+ {
+ pDfsSource->SourceId = NvRmClockSource_Invalid; // invalidate source
+ pDfsSource->DividerSetting = NVRM_AP20_DFS_EMC_FREQ_STEPS;
+ pDfsSource->SourceKHz = s_Ap20EmcConfig.pEmc2xState->actual_freq;
+ pDfsSource->MinMv = NvRmVoltsMaximum; // no v-scaling in this case
+ return FinalStep;
+ }
+
+ // Search sorted pre-defind EMC frequencies for the entry above and closest
+ // to the traget that also has CPU limit above the CPU target. Use PLLM0
+ // entry if not found.
+ for (i = NVRM_AP20_DFS_EMC_FREQ_STEPS; i > 0;)
+ {
+ i--;
+ if ((DomainKHz <= s_Ap20EmcConfigSortedTable[i].Emc2xKHz) &&
+ (*pCpuTargetKHz <= s_Ap20EmcConfigSortedTable[i].CpuLimitKHz))
+ break;
+ }
+
+ // Target can be reached in one step, provided:
+ // - either current or target entry is PLLM0 OR
+ // - current and target entries have same source OR
+ // - current and target entries have same divider
+ if ((i != 0) && (s_Ap20EmcConfig.Index != 0) &&
+ (s_Ap20EmcConfigSortedTable[i].Emc2xDivisor !=
+ s_Ap20EmcConfigSortedTable[s_Ap20EmcConfig.Index].Emc2xDivisor) &&
+ (s_Ap20EmcConfigSortedTable[i].Emc2xClockSource !=
+ s_Ap20EmcConfigSortedTable[s_Ap20EmcConfig.Index].Emc2xClockSource))
+ {
+ i = 0; // one-step check failed - use PLLM0 as intermediate target
+ FinalStep = NV_FALSE;
+ }
+
+ // Record found EMC target, and limit CPU target if necessary
+ pDfsSource->DividerSetting = i;
+ pDfsSource->SourceId = s_Ap20EmcConfig.pEmcInfo->Sources[
+ s_Ap20EmcConfigSortedTable[i].Emc2xClockSource];
+ pDfsSource->SourceKHz = s_Ap20EmcConfigSortedTable[i].Emc2xKHz;
+ pDfsSource->MinMv =
+ s_Ap20EmcConfigSortedTable[i].pOdmEmcConfig->EmcCoreVoltageMv;
+ if (*pCpuTargetKHz > s_Ap20EmcConfigSortedTable[i].CpuLimitKHz)
+ *pCpuTargetKHz = s_Ap20EmcConfigSortedTable[i].CpuLimitKHz;
+ return FinalStep;
+}
+
+static void
+Ap20Emc2xClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz* pDomainKHz,
+ const NvRmDfsSource* pDfsSource)
+{
+ NvU32 Index;
+
+ // Always return the requested source frequency
+ *pDomainKHz = pDfsSource->SourceKHz;
+ NV_ASSERT(*pDomainKHz);
+
+ // If no valid source is found, EMC frequency is not scaled.
+ if (pDfsSource->SourceId == NvRmClockSource_Invalid)
+ return;
+
+ // Divider settings in EMC source descriptor is an index into the table of
+ // pre-defined EMC configurations in descending frequency order.
+ Index = pDfsSource->DividerSetting;
+ if (Index == s_Ap20EmcConfig.Index)
+ return; // do nothing if new index is the same as current
+
+ // Switch EMC to the new target
+ if (Index == 0)
+ {
+ Ap20EmcSwitchToUndividedPllM0(
+ hRmDevice, &s_Ap20EmcConfigSortedTable[Index]);
+ }
+ else if (s_Ap20EmcConfig.Index == 0)
+ {
+ Ap20EmcSwitchFromUndividedPllM0(
+ hRmDevice, &s_Ap20EmcConfigSortedTable[Index]);
+ }
+ else
+ {
+ Ap20EmcSwitchDividedSources(
+ hRmDevice, &s_Ap20EmcConfigSortedTable[Index]);
+ }
+ s_Ap20EmcConfig.Index = Index;
+}
+
+/*****************************************************************************/
+
+static void
+Ap20VdeClockStateUpdate(
+ NvRmDeviceHandle hRmDevice)
+{
+ NvU32 reg;
+ NvRmFreqKHz SourceClockFreq;
+ NvRmModuleClockInfo* pCinfo = s_Ap20VdeConfig.pVdeInfo;
+ NvRmModuleClockState* pCstate = s_Ap20VdeConfig.pVdeState;
+
+ NV_ASSERT(pCinfo && pCstate);
+
+ // Determine VDE source and divider setting; update VDE clock state
+ reg = NV_REGR(hRmDevice,
+ NvRmPrivModuleID_ClockAndReset, 0, pCinfo->ClkSourceOffset);
+ pCstate->Divider =
+ ((reg >> pCinfo->DivisorFieldShift) & pCinfo->DivisorFieldMask);
+ pCstate->SourceClock =
+ ((reg >> pCinfo->SourceFieldShift) & pCinfo->SourceFieldMask);
+ SourceClockFreq =
+ NvRmPrivGetClockSourceFreq(pCinfo->Sources[pCstate->SourceClock]);
+
+ // Fractional divider output = (Source Frequency * 2) / (divider + 2)
+ pCstate->actual_freq = ((SourceClockFreq << 1) / (pCstate->Divider + 2));
+}
+
+static void Ap20VdeConfigInit(NvRmDeviceHandle hRmDevice)
+{
+ // Init VDE configuration shadow structure
+ NV_ASSERT_SUCCESS(NvRmPrivGetClockState(
+ hRmDevice, NvRmModuleID_Vde,
+ &s_Ap20VdeConfig.pVdeInfo, &s_Ap20VdeConfig.pVdeState));
+
+ // Get VDE clock state from h/w
+ Ap20VdeClockStateUpdate(hRmDevice);
+}
+
+static void
+Ap20VdeClockSourceFind(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz DomainKHz,
+ NvRmDfsSource* pDfsSource)
+{
+ NvU32 c, m;
+ NvRmFreqKHz SourceKHz, ReachedKHzP, ReachedKHzC, ReachedKHzM;
+ NV_ASSERT(DomainKHz <= MaxKHz);
+
+ // VDE clock is disabled - can not change configuration at all,
+ // and does not have any voltage requirements
+ if (s_Ap20VdeConfig.pVdeState->refCount == 0)
+ {
+ pDfsSource->SourceId = NvRmClockSource_Invalid;
+ pDfsSource->MinMv = NvRmVoltsUnspecified;
+ return;
+ }
+
+ // 1st try oscillator through VDE divider
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
+ if (DomainKHz <= SourceKHz)
+ {
+ pDfsSource->SourceId = NvRmClockSource_ClkM;
+ pDfsSource->DividerSetting = NvRmPrivFindFreqMinAbove(
+ s_Ap20VdeConfig.pVdeInfo->Divider, SourceKHz, MaxKHz, &DomainKHz);
+ goto get_mv;
+ }
+
+ // 2nd option - PLLP0 through VDE divider
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllP0);
+ if (DomainKHz <= SourceKHz)
+ {
+ pDfsSource->SourceId = NvRmClockSource_PllP0;
+ pDfsSource->DividerSetting = NvRmPrivFindFreqMinAbove(
+ s_Ap20VdeConfig.pVdeInfo->Divider, SourceKHz, MaxKHz, &DomainKHz);
+ goto get_mv;
+ }
+
+ // PLLP0 does not "cover" the target - nevertheless, check it against
+ // PLLC0 and PLLM0 - it may still provide the best approximation
+ ReachedKHzP = SourceKHz;
+ ReachedKHzC = ReachedKHzM = DomainKHz;
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllC0);
+ c = NvRmPrivFindFreqMinAbove(
+ s_Ap20VdeConfig.pVdeInfo->Divider, SourceKHz, MaxKHz, &ReachedKHzC);
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM0);
+ m = NvRmPrivFindFreqMinAbove(
+ s_Ap20VdeConfig.pVdeInfo->Divider, SourceKHz, MaxKHz, &ReachedKHzM);
+
+ if ((ReachedKHzC <= ReachedKHzP) && (ReachedKHzM <= ReachedKHzP))
+ {
+ pDfsSource->SourceId = NvRmClockSource_PllP0;
+ pDfsSource->DividerSetting = 0;
+ SourceKHz = DomainKHz = ReachedKHzP;
+ goto get_mv;
+ }
+
+ /*
+ * 3rd option - PLLC0 through VDE divider or 4th option - PLLM0 through
+ * VDE divider. Option selection is based on the following rule: select
+ * the divider with smaller frequency if it is equal or above the target
+ * frequency, otherwise select the divider with bigger output frequency.
+ */
+ if (ReachedKHzM > ReachedKHzC)
+ {
+ if (ReachedKHzC >= DomainKHz)
+ {
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllC0);
+ pDfsSource->SourceId = NvRmClockSource_PllC0;
+ pDfsSource->DividerSetting = c;
+ DomainKHz = ReachedKHzC; // use PLLC0 as source
+ goto get_mv;
+ }
+ }
+ else // ReachedKHzM <= ReachedKHzC
+ {
+ if (ReachedKHzM < DomainKHz)
+ {
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllC0);
+ pDfsSource->SourceId = NvRmClockSource_PllC0;
+ pDfsSource->DividerSetting = c;
+ DomainKHz = ReachedKHzC; // use PLLC0 as source
+ goto get_mv;
+ }
+ }
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM0);
+ pDfsSource->SourceId = NvRmClockSource_PllM0;
+ pDfsSource->DividerSetting = m;
+ DomainKHz = ReachedKHzM; // use PLLM0 as source
+
+get_mv:
+ // Finally update VDE v-scale references, get operational voltage for the
+ // found source/divider settings, and store new domain frequency
+ pDfsSource->MinMv = NvRmPrivModuleVscaleReAttach(
+ hRmDevice, s_Ap20VdeConfig.pVdeInfo, s_Ap20VdeConfig.pVdeState,
+ DomainKHz, SourceKHz);
+ pDfsSource->SourceKHz = DomainKHz;
+}
+
+static void
+Ap20VdeClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz* pDomainKHz,
+ const NvRmDfsSource* pDfsSource)
+{
+// Shortcut - number of AP20 VDE sources (instead of checking descriptor)
+#define AP20_VDE_SOURCES_NUMBER (4)
+
+ NvU32 SourceIndex;
+ NvRmModuleClockInfo* pCinfo = s_Ap20VdeConfig.pVdeInfo;
+ NvRmModuleClockState* pCstate = s_Ap20VdeConfig.pVdeState;
+
+ // Configuration can not be changed (VDE clock disabled) - exit
+ if (pDfsSource->SourceId == NvRmClockSource_Invalid)
+ {
+ *pDomainKHz = pCstate->actual_freq;
+ return;
+ }
+
+ // Convert Source ID into VDE source selector index
+ for (SourceIndex = 0; SourceIndex < AP20_VDE_SOURCES_NUMBER; SourceIndex++)
+ {
+ NvRmClockSource SourceId = pCinfo->Sources[SourceIndex];
+ if (SourceId == pDfsSource->SourceId)
+ break;
+ }
+ NV_ASSERT(SourceIndex < AP20_VDE_SOURCES_NUMBER);
+
+ // No changes in VDE clock configuration - exit
+ if ((pCstate->SourceClock == SourceIndex) &&
+ (pCstate->Divider == pDfsSource->DividerSetting))
+ {
+ *pDomainKHz = pCstate->actual_freq;
+ return;
+ }
+
+ // Set new VDE clock state and update PLL references
+ pCstate->SourceClock = SourceIndex;
+ pCstate->Divider = pDfsSource->DividerSetting;
+ pCstate->actual_freq = pDfsSource->SourceKHz;
+ NvRmPrivModuleClockSet(hRmDevice, pCinfo, pCstate);
+ NvRmPrivModuleClockReAttach(hRmDevice, pCinfo, pCstate);
+
+ *pDomainKHz = pCstate->actual_freq;
+}
+
+/*****************************************************************************/
+
+static void
+Ap20SystemClockSourceFind(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz DomainKHz,
+ NvRmDfsSource* pDfsSource)
+{
+ NvU32 i;
+ NvRmMilliVolts DivMv;
+ NvRmFreqKHz SourceKHz, M1KHz, C1KHz;
+ NV_ASSERT(DomainKHz <= MaxKHz);
+ DivMv = pDfsSource->DividerSetting = 0; // no 2ndary divider by default
+
+ // 1st try oscillator
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
+ NV_ASSERT(SourceKHz <= MaxKHz);
+ if (DomainKHz <= SourceKHz)
+ {
+ pDfsSource->SourceId = NvRmClockSource_ClkM;
+ pDfsSource->SourceKHz = SourceKHz;
+ goto get_mv;
+ }
+
+ // 2nd choice - doubler
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkD);
+ NV_ASSERT(SourceKHz <= MaxKHz);
+ if (DomainKHz <= SourceKHz)
+ {
+ pDfsSource->SourceId = NvRmClockSource_ClkD;
+ pDfsSource->SourceKHz = SourceKHz;
+ goto get_mv;
+ }
+
+ /*
+ * 3rd option - PLLP divider per policy specification. Find
+ * the policy entry with source frequency closest and above requested.
+ * If requested frequency exceeds all policy options within domain
+ * maximum limit, select the entry with the highest possible frequency.
+ */
+ for (i = 0; i < s_Ap20PllPSystemClockPolicyEntries; i++)
+ {
+ SourceKHz = s_Ap20PllPSystemClockPolicy[i].SourceKHz;
+ if (SourceKHz > MaxKHz)
+ {
+ NV_ASSERT(i);
+ i--;
+ break;
+ }
+ if (DomainKHz <= SourceKHz)
+ {
+ break;
+ }
+ }
+ if (i == s_Ap20PllPSystemClockPolicyEntries)
+ {
+ i--; // last/highest source is the best we can do
+ }
+ SourceKHz = s_Ap20PllPSystemClockPolicy[i].SourceKHz;
+
+ /*
+ * 4th and 5th options - PLLM1 and PLLC1 secondary dividers are configured
+ * at maximum possible frequency during initialization or whenever base PLL
+ * settings are changed. Used these options only if PLLP can not provide
+ * high enough source frequency for the requested target. Always select the
+ * source (PLLM1 or PLLC1) with bigger frequency.
+ */
+ if (SourceKHz < DomainKHz)
+ {
+ M1KHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM1);
+ C1KHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllC1);
+ if ((M1KHz > SourceKHz) || (C1KHz > SourceKHz))
+ {
+ if (M1KHz > C1KHz)
+ {
+ pDfsSource->SourceKHz = M1KHz; // Selected PLLM 2ndary divider
+ pDfsSource->SourceId = NvRmClockSource_PllM1;
+ DivMv = NvRmPrivSourceVscaleGetMV(hRmDevice,
+ NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM0));
+ }
+ else
+ {
+ pDfsSource->SourceKHz = C1KHz; // Selected PLLC 2ndary divider
+ pDfsSource->SourceId = NvRmClockSource_PllC1;
+ DivMv = NvRmPrivSourceVscaleGetMV(hRmDevice,
+ NvRmPrivGetClockSourceFreq(NvRmClockSource_PllC0));
+ }
+ goto get_mv;
+ }
+ }
+ pDfsSource->SourceKHz = SourceKHz; // Selected PLLP 2ndary divider
+ pDfsSource->SourceId = s_Ap20PllPSystemClockPolicy[i].SourceId;
+ pDfsSource->DividerSetting = s_Ap20PllPSystemClockPolicy[i].DividerSetting;
+ DivMv = NvRmPrivSourceVscaleGetMV(hRmDevice,
+ NvRmPrivGetClockSourceFreq(NvRmClockSource_PllP0));
+
+get_mv:
+ // Finally get operational voltage for found source
+ pDfsSource->MinMv = NvRmPrivModuleVscaleGetMV(
+ hRmDevice, NvRmPrivModuleID_System, pDfsSource->SourceKHz);
+ if (pDfsSource->MinMv < DivMv)
+ pDfsSource->MinMv = DivMv;
+}
+
+static void
+Ap20SystemBusClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz* pDomainKHz,
+ const NvRmDfsSource* pDfsSource)
+{
+ NvRmClockSource SourceId = pDfsSource->SourceId;
+ const NvRmCoreClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_SystemBus)->pInfo.pCore;
+
+ switch(SourceId)
+ {
+ case NvRmClockSource_PllP2:
+ // Reconfigure PLLP variable divider if it is used as a source
+ NvRmPrivDividerSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(SourceId)->pInfo.pDivider,
+ pDfsSource->DividerSetting);
+ // fall through
+ case NvRmClockSource_PllC1:
+ case NvRmClockSource_PllM1:
+ case NvRmClockSource_ClkD:
+ case NvRmClockSource_ClkM:
+ break; // fixed sources - do nothing
+ default:
+ NV_ASSERT(!"Invalid source (per policy)");
+ }
+ NV_ASSERT_SUCCESS(NvRmPrivCoreClockConfigure(
+ hRmDevice, pCinfo, MaxKHz, pDomainKHz, &SourceId));
+}
+
+static void
+Ap20SetCpuPowerGoodDelay(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz ApbKHz)
+{
+ NvU32 reg;
+ NV_ASSERT(s_Ap20CpuConfig.CpuPowerGoodUs);
+
+ // AP20 CPU power good delay is counted by h/w in APB clocks (use
+ // 1/1000 ~ 5/4096 with 20% margin)
+ reg = ((ApbKHz * 5) >> 12) * s_Ap20CpuConfig.CpuPowerGoodUs;
+ reg = NV_DRF_NUM(APBDEV_PMC, CPUPWRGOOD_TIMER, DATA, reg);
+ NV_REGW(hRmDevice, NvRmModuleID_Pmif, 0,
+ APBDEV_PMC_CPUPWRGOOD_TIMER_0, reg);
+}
+
+/*****************************************************************************/
+
+// Fixed point calculation bits
+#define FIXED_POINT_BITS (10)
+
+static void Ap20CpuConfigInit(NvRmDeviceHandle hRmDevice)
+{
+ NvOdmPmuProperty PmuProperty;
+
+ // Init PLLX frequency steps table based on chacterization data, so that
+ // each entry corresponds to the v-scale level
+ s_Ap20CpuConfig.pPllXStepsKHz = NvRmPrivModuleVscaleGetMaxKHzList(
+ hRmDevice, NvRmModuleID_Cpu, &s_Ap20CpuConfig.PllXStepsNo);
+ NV_ASSERT(s_Ap20CpuConfig.pPllXStepsKHz && s_Ap20CpuConfig.PllXStepsNo);
+ NV_ASSERT(s_Ap20CpuConfig.pPllXStepsKHz[0] >= NVRM_PLLP_FIXED_FREQ_KHZ);
+
+ // Init CPU power good delay and Core over CPU voltage dependency
+ // parameters based on PMU property.
+ if (!NvOdmQueryGetPmuProperty(&PmuProperty))
+ {
+ PmuProperty.CpuPowerGoodUs = NVRM_DEFAULT_CPU_PWRGOOD_US;
+ PmuProperty.AccuracyPercent = NVRM_DEFAULT_PMU_ACCURACY_PCT;
+ }
+ NV_ASSERT(PmuProperty.CpuPowerGoodUs && PmuProperty.AccuracyPercent);
+ NV_ASSERT(PmuProperty.AccuracyPercent < 5); // 5% is a must for PMU
+
+ s_Ap20CpuConfig.CpuPowerGoodUs = PmuProperty.CpuPowerGoodUs;
+ s_Ap20CpuConfig.CoreOverCpuOffset = (NV_AP20_CORE_OVER_CPU_MV * 100) /
+ (100 - PmuProperty.AccuracyPercent);
+ s_Ap20CpuConfig.CoreOverCpuSlope =
+ ((0x1 << FIXED_POINT_BITS) * (100 + PmuProperty.AccuracyPercent)) /
+ (100 - PmuProperty.AccuracyPercent);
+}
+
+static void
+Ap20CpuClockSourceFind(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz DomainKHz,
+ NvRmDfsSource* pDfsSource,
+ NvRmMilliVolts* pSystemMv)
+{
+ NvU32 i;
+ NvRmMilliVolts DivMv;
+ NvRmFreqKHz SourceKHz;
+
+ NV_ASSERT(DomainKHz <= MaxKHz);
+ NV_ASSERT(s_Ap20CpuConfig.pPllXStepsKHz);
+ DivMv = pDfsSource->DividerSetting = 0; // no 2ndary divider by default
+
+ // 1st try oscillator
+ SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
+ NV_ASSERT(SourceKHz <= MaxKHz);
+ if (DomainKHz <= SourceKHz)
+ {
+ pDfsSource->SourceId = NvRmClockSource_ClkM;
+ pDfsSource->SourceKHz = SourceKHz;
+ goto get_mv;
+ }
+
+ // 2nd choice - PLLP divider per policy specification
+ SourceKHz =
+ s_Ap20PllPCpuClockPolicy[s_Ap20PllPCpuClockPolicyEntries-1].SourceKHz;
+ NV_ASSERT(SourceKHz <= MaxKHz);
+ if (DomainKHz <= SourceKHz)
+ {
+ // The requested frequency is within PLLP divider policy table, and all
+ // policy entries are within domain maximum limit. Then, find the entry
+ // with source frequency closest and above the requested.
+ for (i = 0; i < s_Ap20PllPCpuClockPolicyEntries; i++)
+ {
+ SourceKHz = s_Ap20PllPCpuClockPolicy[i].SourceKHz;
+ if (DomainKHz <= SourceKHz)
+ break;
+ }
+ if (s_Ap20PllPCpuClockPolicy[i].DividerSetting == 0)
+ pDfsSource->SourceId = NvRmClockSource_PllP0; // Bypass 1:1 divider
+ else
+ {
+ pDfsSource->SourceId = s_Ap20PllPCpuClockPolicy[i].SourceId;
+ DivMv = NvRmPrivSourceVscaleGetMV(hRmDevice,
+ NvRmPrivGetClockSourceFreq(NvRmClockSource_PllP0));
+ }
+ pDfsSource->SourceKHz = s_Ap20PllPCpuClockPolicy[i].SourceKHz;
+ pDfsSource->DividerSetting = s_Ap20PllPCpuClockPolicy[i].DividerSetting;
+ goto get_mv;
+ }
+
+ /*
+ * 3rd and final choice - PLLX base output. Clip PllX policy entries to
+ * domain maximum limit, and find the entry with source frequency closest
+ * and above the requested. If not found, use the last entry with the
+ * highest frequency.
+ */
+ for (i = 0; i < s_Ap20CpuConfig.PllXStepsNo; i++)
+ {
+ SourceKHz = NV_MIN(s_Ap20CpuConfig.pPllXStepsKHz[i], MaxKHz);
+ if (DomainKHz <= SourceKHz)
+ break;
+ }
+ pDfsSource->SourceId = NvRmClockSource_PllX0;
+ pDfsSource->SourceKHz = SourceKHz;
+
+get_mv:
+ // Finally get operational voltage for found source
+ pDfsSource->MinMv = NvRmPrivModuleVscaleGetMV(
+ hRmDevice, NvRmModuleID_Cpu, pDfsSource->SourceKHz);
+ *pSystemMv = ((pDfsSource->MinMv * s_Ap20CpuConfig.CoreOverCpuSlope) >>
+ FIXED_POINT_BITS) + s_Ap20CpuConfig.CoreOverCpuOffset;
+ *pSystemMv = NV_MAX(DivMv, (*pSystemMv));
+}
+
+static void
+Ap20CpuBusClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz* pDomainKHz,
+ const NvRmDfsSource* pDfsSource)
+{
+ NvRmFreqKHz SourceKHz = pDfsSource->SourceKHz;
+ NvRmClockSource SourceId = pDfsSource->SourceId;
+ const NvRmCoreClockInfo* pCinfo =
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBus)->pInfo.pCore;
+
+ switch(SourceId)
+ {
+ case NvRmClockSource_PllX0:
+ // Reconfigure PLLX if it is used as a source
+ NvRmPrivReConfigurePllX(hRmDevice, SourceKHz);
+ break;
+ case NvRmClockSource_PllP4:
+ // Reconfigure PLLP variable divider if it is used as a source
+ NvRmPrivDividerSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(SourceId)->pInfo.pDivider,
+ pDfsSource->DividerSetting);
+ // fall through
+ case NvRmClockSource_PllP0:
+ case NvRmClockSource_ClkM:
+ break; // fixed sources - do nothing
+ default:
+ NV_ASSERT(!"Invalid source (per policy)");
+ }
+ NV_ASSERT_SUCCESS(NvRmPrivCoreClockConfigure(
+ hRmDevice, pCinfo, MaxKHz, pDomainKHz, &SourceId));
+}
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+void
+NvRmPrivAp20ScaledClockConfigInit(NvRmDeviceHandle hRmDevice)
+{
+ Ap20EmcConfigInit(hRmDevice);
+ Ap20VdeConfigInit(hRmDevice);
+ Ap20CpuConfigInit(hRmDevice);
+}
+
+NvBool NvRmPrivAp20DfsClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDfsFrequencies* pMaxKHz,
+ NvRmDfsFrequencies* pDfsKHz)
+{
+ NvBool Status;
+ NvRmFreqKHz FreqKHz;
+ NvRmMilliVolts SystemMv;
+ NvRmDfsSource CpuClockSource, Emc2xClockSource;
+ NvRmDfsSource SystemClockSource, VdeClockSource;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pMaxKHz && pDfsKHz);
+
+ /*
+ * Adjust System bus core clock. It should be sufficient to supply AVP,
+ * and all bus clocks. Also make sure that AHB bus frequency is above
+ * the one requested for APB clock.
+ */
+ pDfsKHz->Domains[NvRmDfsClockId_Ahb] = NV_MAX(
+ pDfsKHz->Domains[NvRmDfsClockId_Ahb],
+ pDfsKHz->Domains[NvRmDfsClockId_Apb]);
+ FreqKHz = pDfsKHz->Domains[NvRmDfsClockId_System];
+ FreqKHz = NV_MAX(FreqKHz, pDfsKHz->Domains[NvRmDfsClockId_Ahb]);
+ FreqKHz = NV_MAX(FreqKHz, pDfsKHz->Domains[NvRmDfsClockId_Avp]);
+ pDfsKHz->Domains[NvRmDfsClockId_System] = FreqKHz;
+
+#if LIMIT_SYS_TO_AHB_APB_RATIOS
+ if (pDfsKHz->Domains[NvRmDfsClockId_Apb] < (FreqKHz >> 2))
+ {
+ pDfsKHz->Domains[NvRmDfsClockId_Apb] = (FreqKHz >> 2);
+ }
+ if (pDfsKHz->Domains[NvRmDfsClockId_Ahb] < (FreqKHz >> 1))
+ {
+ pDfsKHz->Domains[NvRmDfsClockId_Ahb] = (FreqKHz >> 1);
+ }
+#endif
+
+ // Find clock sources for CPU, System, VDE and Memory clocks.
+ Ap20VdeClockSourceFind(hRmDevice,
+ pMaxKHz->Domains[NvRmDfsClockId_Vpipe],
+ pDfsKHz->Domains[NvRmDfsClockId_Vpipe],
+ &VdeClockSource);
+ Ap20SystemClockSourceFind(hRmDevice,
+ pMaxKHz->Domains[NvRmDfsClockId_System],
+ pDfsKHz->Domains[NvRmDfsClockId_System],
+ &SystemClockSource);
+ Status = Ap20Emc2xClockSourceFind(hRmDevice,
+ (pMaxKHz->Domains[NvRmDfsClockId_Emc] << 1),
+ (pDfsKHz->Domains[NvRmDfsClockId_Emc] << 1),
+ &pDfsKHz->Domains[NvRmDfsClockId_Cpu], // Need for CPU/EMC ratio policy
+ &Emc2xClockSource);
+ Ap20CpuClockSourceFind(hRmDevice,
+ pMaxKHz->Domains[NvRmDfsClockId_Cpu],
+ pDfsKHz->Domains[NvRmDfsClockId_Cpu],
+ &CpuClockSource, &SystemMv);
+ // CPU clock source may affect system core voltage as well
+ SystemMv = NV_MAX(SystemMv, SystemClockSource.MinMv);
+
+#if !NV_OAL
+ // Adjust core and cpu voltage for the new clock sources before actual
+ // change. Note that only voltage requirements for always running clocks
+ // (CPU, System, EMC) are specified explicitely. VDE voltage requirement
+ // is already integrated with other clock-gated modules.
+ NvRmPrivVoltageScale(NV_TRUE, CpuClockSource.MinMv,
+ SystemMv, Emc2xClockSource.MinMv);
+#endif
+
+ // Configure VDE, System bus and derived clocks (do not care about MIO on
+ // AP20). Note that APB is the only clock in system complex that may have
+ // different (lower) maximum limit - pass it explicitly to set function.
+ Ap20SystemBusClockConfigure(hRmDevice,
+ pMaxKHz->Domains[NvRmDfsClockId_System],
+ &pDfsKHz->Domains[NvRmDfsClockId_System],
+ &SystemClockSource);
+ pDfsKHz->Domains[NvRmDfsClockId_Avp] = FreqKHz = // no AVP clock skipping
+ pDfsKHz->Domains[NvRmDfsClockId_System];
+ NvRmPrivBusClockFreqSet(hRmDevice,
+ pDfsKHz->Domains[NvRmDfsClockId_System],
+ &FreqKHz, // VDE decoupled
+ &pDfsKHz->Domains[NvRmDfsClockId_Ahb],
+ &pDfsKHz->Domains[NvRmDfsClockId_Apb],
+ pMaxKHz->Domains[NvRmDfsClockId_Apb]);
+ Ap20SetCpuPowerGoodDelay(hRmDevice, pDfsKHz->Domains[NvRmDfsClockId_Apb]);
+ Ap20VdeClockConfigure(hRmDevice,
+ pMaxKHz->Domains[NvRmDfsClockId_Vpipe],
+ &pDfsKHz->Domains[NvRmDfsClockId_Vpipe],
+ &VdeClockSource);
+
+ // Configure Memory clocks and convert frequency to DFS EMC 1x domain
+ FreqKHz = pDfsKHz->Domains[NvRmDfsClockId_Emc] << 1;
+ Ap20Emc2xClockConfigure(hRmDevice,
+ (pMaxKHz->Domains[NvRmDfsClockId_Emc] << 1),
+ &FreqKHz, &Emc2xClockSource);
+ pDfsKHz->Domains[NvRmDfsClockId_Emc] = FreqKHz >> 1;
+
+ // Configure CPU core clock
+ Ap20CpuBusClockConfigure(hRmDevice,
+ pMaxKHz->Domains[NvRmDfsClockId_Cpu],
+ &pDfsKHz->Domains[NvRmDfsClockId_Cpu],
+ &CpuClockSource);
+
+#if !NV_OAL
+ // Adjust core and cpu voltage after actual clock change.
+ NvRmPrivVoltageScale(NV_FALSE, CpuClockSource.MinMv,
+ SystemMv, Emc2xClockSource.MinMv);
+#endif
+ return Status;
+}
+
+void
+NvRmPrivAp20DfsClockFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmDfsFrequencies* pDfsKHz)
+{
+ NvRmFreqKHz SystemFreq;
+ const NvRmCoreClockInfo* pCinfo;
+ NV_ASSERT(hRmDevice && pDfsKHz);
+
+ // Get frequencies of the System core clock, AVP clock (the same as System
+ // - no clock skipping), AHB, APB, and V-pipe bus clock. Note that on AP20
+ // V-pipe is decoupled from the System bus, and has its own controls.
+ pCinfo = NvRmPrivGetClockSourceHandle(NvRmClockSource_SystemBus)->pInfo.pCore;
+ SystemFreq = NvRmPrivCoreClockFreqGet(hRmDevice, pCinfo);
+ pDfsKHz->Domains[NvRmDfsClockId_System] = SystemFreq;
+ pDfsKHz->Domains[NvRmDfsClockId_Avp] = SystemFreq;
+
+ NvRmPrivBusClockFreqGet(
+ hRmDevice, SystemFreq,
+ &pDfsKHz->Domains[NvRmDfsClockId_Vpipe],
+ &pDfsKHz->Domains[NvRmDfsClockId_Ahb],
+ &pDfsKHz->Domains[NvRmDfsClockId_Apb]);
+ Ap20VdeClockStateUpdate(hRmDevice);
+ pDfsKHz->Domains[NvRmDfsClockId_Vpipe] =
+ s_Ap20VdeConfig.pVdeState->actual_freq;
+
+ // Get CPU core clock frequencies
+ pCinfo = NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBus)->pInfo.pCore;
+ pDfsKHz->Domains[NvRmDfsClockId_Cpu] =
+ NvRmPrivCoreClockFreqGet(hRmDevice, pCinfo);
+
+ // Get EMC clock frequency (DFS monitors EMC 1x domain)
+ Ap20Emc2xClockStateUpdate(hRmDevice); // Get EMC2x clock state from h/w
+ pDfsKHz->Domains[NvRmDfsClockId_Emc] =
+ (s_Ap20EmcConfig.pEmc2xState->actual_freq >> 1);
+}
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+void
+NvRmPrivAp20FastClockConfig(NvRmDeviceHandle hRmDevice)
+{
+#if !NV_OAL
+ NvU32 divc1, divm1, divp2;
+ NvRmFreqKHz SclkKHz, CpuKHz, PllP2KHz, PllM1KHz, PllC1KHz;
+ NvRmDfsSource VdeSource;
+
+ NvRmFreqKHz FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM0);
+ if (NvRmPrivGetExecPlatform(hRmDevice) != ExecPlatform_Soc)
+ return; // fast clocks on SoC only
+
+ // Set fastest EMC/MC configuration provided PLLM0 boot frequency matches
+ // one of the pre-defined configurations, i.e, it is the first entry in the
+ // sorted table
+ if ((s_Ap20EmcConfigSortedTable[0].Emc2xKHz == FreqKHz) &&
+ (s_Ap20EmcConfig.Index != 0))
+ {
+ Ap20EmcSwitchToUndividedPllM0(hRmDevice, s_Ap20EmcConfigSortedTable);
+ s_Ap20EmcConfig.Index = 0;
+ }
+
+ // Set AVP/System Bus clock (now, with nominal core voltage it can be up
+ // to SoC maximum). First determine settings for PLLP/PLLM/PLLC secondary
+ // dividers to get maximum possible frequency on PLLP_OUT2, or PLLM_OUT1
+ // or PLLC_OUT1 outputs.
+ SclkKHz = NvRmPrivGetSocClockLimits(NvRmModuleID_Avp)->MaxKHz;
+ NV_ASSERT(SclkKHz);
+
+ FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllP0);
+ PllP2KHz = SclkKHz;
+ divp2 = NvRmPrivFindFreqMaxBelow(
+ NvRmClockDivider_Fractional_2, FreqKHz, PllP2KHz, &PllP2KHz);
+
+ FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM0);
+ PllM1KHz = SclkKHz;
+ divm1 = NvRmPrivFindFreqMaxBelow(
+ NvRmClockDivider_Fractional_2, FreqKHz, PllM1KHz, &PllM1KHz);
+
+ FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllC0);
+ PllC1KHz = SclkKHz;
+ divc1 = NvRmPrivFindFreqMaxBelow(
+ NvRmClockDivider_Fractional_2, FreqKHz, PllC1KHz, &PllC1KHz);
+
+ // Now configure secondary dividers and select the output with highest
+ // frequency // as a source for the system bus clock.
+ SclkKHz = NV_MAX(PllC1KHz, NV_MAX(PllM1KHz, PllP2KHz));
+ NvRmPrivDividerSet(
+ hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllP2)->pInfo.pDivider,
+ divp2);
+ NvRmPrivDividerSet(
+ hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllM1)->pInfo.pDivider,
+ divm1);
+ NvRmPrivDividerSet(
+ hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllC1)->pInfo.pDivider,
+ divc1);
+ if (SclkKHz == PllP2KHz)
+ {
+ NvRmPrivCoreClockSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_SystemBus)->pInfo.pCore,
+ NvRmClockSource_PllP2, 0, 0);
+ }
+ else if (SclkKHz == PllM1KHz)
+ {
+ NvRmPrivCoreClockSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_SystemBus)->pInfo.pCore,
+ NvRmClockSource_PllM1, 0, 0);
+ }
+ else
+ {
+ NvRmPrivCoreClockSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_SystemBus)->pInfo.pCore,
+ NvRmClockSource_PllC1, 0, 0);
+ }
+ NvRmPrivBusClockInit(hRmDevice, SclkKHz);
+ Ap20SetCpuPowerGoodDelay(
+ hRmDevice, NvRmPrivGetClockSourceFreq(NvRmClockSource_Apb));
+
+ // Set VDE maximum clock (VDE is disabled after basic reset - need to
+ // temporary enable it for configuration)
+ FreqKHz = NvRmPrivGetSocClockLimits(NvRmModuleID_Vde)->MaxKHz;
+ NvRmPowerModuleClockControl(hRmDevice, NvRmModuleID_Vde, 0, NV_TRUE);
+ Ap20VdeClockSourceFind(hRmDevice, FreqKHz, FreqKHz, &VdeSource);
+ Ap20VdeClockConfigure(hRmDevice, FreqKHz, &FreqKHz, &VdeSource);
+ NvRmPowerModuleClockControl(hRmDevice, NvRmModuleID_Vde, 0, NV_FALSE);
+
+ // Set PLLX0 and CPU clock to SoC maximum (can be done now, when core
+ // voltage is guaranteed to be nominal)
+ CpuKHz = NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MaxKHz;
+ FreqKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllX0);
+ if (CpuKHz != FreqKHz)
+ {
+ NvRmPrivReConfigurePllX(hRmDevice, CpuKHz);
+ }
+ NvRmPrivCoreClockSet(hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_CpuBus)->pInfo.pCore,
+ NvRmClockSource_PllX0, 0, 0);
+
+ // Set PLLP4 fixed frequency to be used by external device(s)
+ NvRmPrivDividerSet(
+ hRmDevice,
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_PllP4)->pInfo.pDivider,
+ NVRM_AP20_FIXED_PLLP4_SETTING);
+#endif
+}
+
+/*****************************************************************************/
+
+void
+NvRmPrivAp20SdioTapDelayConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID ModuleId,
+ NvU32 ClkSourceOffset,
+ NvRmFreqKHz ConfiguredFreqKHz)
+{
+ NvU32 Module = NVRM_MODULE_ID_MODULE( ModuleId );
+ NvU32 Instance = NVRM_MODULE_ID_INSTANCE( ModuleId );
+ const NvOdmQuerySdioInterfaceProperty *pSdioInterfaceProps = NULL;
+ NvU32 ClkSrcReg;
+
+ if (Module != NvRmModuleID_Sdio)
+ return;
+ pSdioInterfaceProps = NvOdmQueryGetSdioInterfaceProperty(Instance);
+ if (pSdioInterfaceProps == NULL)
+ return;
+
+ // Allow only less than 16 as tap delay.
+ NV_ASSERT(pSdioInterfaceProps->TapDelay < 0x10);
+
+ if (pSdioInterfaceProps->TapDelay > 0)
+ {
+ ClkSrcReg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ ClkSourceOffset);
+
+ // CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC1_0_SDMMC1_INT_FB_SEL_RANGE
+ ClkSrcReg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_SDMMC1,
+ SDMMC1_INT_FB_SEL, 1, ClkSrcReg);
+
+ // CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC1_0_SDMMC1_INT_FB_DLY_RANGE
+ ClkSrcReg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_SDMMC1,
+ SDMMC1_INT_FB_DLY, pSdioInterfaceProps->TapDelay, ClkSrcReg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ ClkSourceOffset, ClkSrcReg);
+ }
+}
+
+/*****************************************************************************/
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clocks.c b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clocks.c
new file mode 100644
index 000000000000..ceb815f944c6
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clocks.c
@@ -0,0 +1,1308 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvassert.h"
+#include "nvrm_clocks.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_module.h"
+#include "nvrm_drf.h"
+#include "nvrm_pmu_private.h"
+#include "ap20/arclk_rst.h"
+#include "ap20/arahb_arbc.h"
+#include "ap20/arapbpm.h"
+#include "ap15/ap15rm_private.h"
+#include "ap20rm_clocks.h"
+#include "ap20/arfuse.h"
+
+
+// This list requires pre-sorted info in bond-out registers order and bond-out
+// register bit shift order (MSB-to-LSB).
+static const NvU32 s_Ap20BondOutTable[] =
+{
+ // BOND_OUT_L bits
+ NVRM_DEVICE_UNKNOWN, // NV_DEVID_CPU
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_MODULE_ID( NvRmModuleID_Ac97, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Rtc, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Timer, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Uart, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Uart, 1 ),
+ NVRM_MODULE_ID( NvRmPrivModuleID_Gpio, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Sdio, 1 ),
+ NVRM_MODULE_ID( NvRmModuleID_Spdif, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_I2s, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_I2c, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Nand, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Sdio, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Sdio, 3 ),
+ NVRM_MODULE_ID( NvRmModuleID_Twc, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Pwm, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_I2s, 1 ),
+ NVRM_MODULE_ID( NvRmModuleID_Epp, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Vi, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_2D, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Usb2Otg, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Isp, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_3D, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Ide, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Display, 1 ),
+ NVRM_MODULE_ID( NvRmModuleID_Display, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_GraphicsHost, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Vcp, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_CacheMemCtrl, 0 ),
+ NVRM_DEVICE_UNKNOWN, // NV_DEVID_COP_CACHE
+
+ // BOND_OUT_H bits
+ NVRM_MODULE_ID( NvRmPrivModuleID_MemoryController, 0 ),
+ NVRM_DEVICE_UNKNOWN, // NV_DEVID_AHB_DMA
+ NVRM_MODULE_ID( NvRmPrivModuleID_ApbDma, 0 ),
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_MODULE_ID( NvRmModuleID_Kbc, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_SysStatMonitor, 0 ),
+ NVRM_DEVICE_UNKNOWN, // PMC
+ NVRM_MODULE_ID( NvRmModuleID_Fuse, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_KFuse, 0 ),
+ NVRM_DEVICE_UNKNOWN, // SBC1
+ NVRM_MODULE_ID( NvRmModuleID_Nor, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Spi, 0 ),
+ NVRM_DEVICE_UNKNOWN, // SBC2
+ NVRM_MODULE_ID( NvRmModuleID_Xio, 0 ),
+ NVRM_DEVICE_UNKNOWN, // SBC3
+ NVRM_MODULE_ID( NvRmModuleID_Dvc, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Dsi, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Tvo, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Mipi, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Hdmi, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Csi, 0 ),
+ NVRM_DEVICE_UNKNOWN, // TVDAC
+ NVRM_MODULE_ID( NvRmModuleID_I2c, 1 ),
+ NVRM_MODULE_ID( NvRmModuleID_Uart, 2 ),
+ NVRM_DEVICE_UNKNOWN, // SPROM
+ NVRM_MODULE_ID( NvRmPrivModuleID_ExternalMemoryController, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Usb2Otg, 1 ),
+ NVRM_MODULE_ID( NvRmModuleID_Usb2Otg, 2 ),
+ NVRM_MODULE_ID( NvRmModuleID_Mpe, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_Vde, 0 ),
+ NVRM_MODULE_ID( NvRmModuleID_BseA, 0 ),
+ NVRM_DEVICE_UNKNOWN, // BSEV
+
+ // BOND_OUT_U bits
+ NVRM_DEVICE_UNKNOWN, // SPEEDO
+ NVRM_MODULE_ID( NvRmModuleID_Uart, 3),
+ NVRM_MODULE_ID( NvRmModuleID_Uart, 4),
+ NVRM_MODULE_ID( NvRmModuleID_I2c, 2),
+ NVRM_DEVICE_UNKNOWN, // SBC4
+ NVRM_MODULE_ID( NvRmModuleID_Sdio, 2),
+ NVRM_MODULE_ID( NvRmPrivModuleID_Pcie, 0),
+ NVRM_MODULE_ID( NvRmModuleID_OneWire, 0),
+ NVRM_DEVICE_UNKNOWN, // AFI
+ NVRM_DEVICE_UNKNOWN, // CSTIE
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_MODULE_ID( NvRmModuleID_AvpUcq, 0),
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_DEVICE_UNKNOWN, // IRAMA
+ NVRM_DEVICE_UNKNOWN, // IRAMB
+ NVRM_DEVICE_UNKNOWN, // IRAMC
+ NVRM_DEVICE_UNKNOWN, // IRAMD
+ NVRM_DEVICE_UNKNOWN, // CRAM2
+ NVRM_DEVICE_UNKNOWN, // SYNC_CLOCK_DOUBLER
+ NVRM_DEVICE_UNKNOWN, // CLK_M_DOUBLER
+ NVRM_DEVICE_UNKNOWN,
+ NVRM_DEVICE_UNKNOWN, // SUS_OUT
+ NVRM_DEVICE_UNKNOWN, // DEV2_OUT
+ NVRM_DEVICE_UNKNOWN, // DEV1_OUT
+ NVRM_DEVICE_UNKNOWN,
+};
+
+void
+NvRmPrivAp20GetBondOut( NvRmDeviceHandle hDevice,
+ const NvU32 **pTable,
+ NvU32 *bondOut )
+{
+ *pTable = s_Ap20BondOutTable;
+ bondOut[0] = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_BOND_OUT_L_0);
+ bondOut[1] = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_BOND_OUT_H_0);
+ bondOut[2] = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_BOND_OUT_U_0);
+}
+
+
+// Top level AP20 clock enable register control macro
+#define CLOCK_ENABLE( rm, offset, field, Enable) \
+ do \
+ { \
+ NvU32 regaddr; \
+ NvU32 reg = 0; \
+ if (Enable == ModuleClockState_Enable) \
+ { \
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_ENB_##offset##_SET, SET_CLK_ENB_##field, 1, reg); \
+ regaddr = CLK_RST_CONTROLLER_CLK_ENB_##offset##_SET_0; \
+ } \
+ else \
+ { \
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_ENB_##offset##_CLR, CLR_CLK_ENB_##field, 1, reg); \
+ regaddr = CLK_RST_CONTROLLER_CLK_ENB_##offset##_CLR_0; \
+ } \
+ NV_REGW((rm), NvRmPrivModuleID_ClockAndReset, 0, regaddr, reg); \
+ } while (0)
+
+
+
+void
+Ap20EnableModuleClock(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId,
+ ModuleClockState ClockState)
+{
+ // Extract module and instance from composite module id.
+ NvU32 Module = NVRM_MODULE_ID_MODULE( ModuleId );
+ NvU32 Instance = NVRM_MODULE_ID_INSTANCE( ModuleId );
+
+ if (ClockState == ModuleClockState_Enable)
+ {
+ NvRmPrivConfigureClockSource(hDevice, ModuleId, NV_TRUE);
+ }
+ switch ( Module ) {
+ case NvRmModuleID_CacheMemCtrl:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ NV_ASSERT(!"AP20 doesn't have such device");
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, L, CACHE2, ClockState );
+ }
+ break;
+ case NvRmModuleID_Vcp:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, VCP, ClockState );
+ break;
+ case NvRmModuleID_GraphicsHost:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, HOST1X, ClockState );
+ break;
+ case NvRmModuleID_Display:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, L, DISP1, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, L, DISP2, ClockState );
+ }
+ break;
+ case NvRmModuleID_Ide:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, IDE, ClockState );
+ break;
+ case NvRmModuleID_3D:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, 3D, ClockState );
+ break;
+ case NvRmModuleID_Isp:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, ISP, ClockState );
+ break;
+ case NvRmModuleID_Usb2Otg:
+ if (Instance == 0)
+ {
+ CLOCK_ENABLE( hDevice, L, USBD, ClockState );
+ }
+ else if (Instance == 1)
+ {
+ CLOCK_ENABLE( hDevice, H, USB2, ClockState );
+ }
+ else if (Instance == 2)
+ {
+ CLOCK_ENABLE( hDevice, H, USB3, ClockState );
+ }
+ else
+ {
+ NV_ASSERT(!"Invalid USB instance");
+ }
+ break;
+ case NvRmModuleID_2D:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, 2D, ClockState );
+ break;
+ case NvRmModuleID_Epp:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, EPP, ClockState );
+ break;
+ case NvRmModuleID_Vi:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, VI, ClockState );
+ break;
+ case NvRmModuleID_I2s:
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, L, I2S1, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, L, I2S2, ClockState );
+ } else
+ {
+ NV_ASSERT(!"Invalid I2S instance");
+ }
+ break;
+ case NvRmModuleID_Twc:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, TWC, ClockState );
+ break;
+ case NvRmModuleID_Pwm:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, PWM, ClockState );
+ break;
+ case NvRmModuleID_Sdio:
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, L, SDMMC1, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, L, SDMMC2, ClockState );
+ } else if (Instance == 2)
+ {
+ CLOCK_ENABLE( hDevice, U, SDMMC3, ClockState );
+ } else if (Instance == 3)
+ {
+ CLOCK_ENABLE( hDevice, L, SDMMC4, ClockState );
+ } else
+ {
+ NV_ASSERT(!"Invalid SDIO instance");
+ }
+ break;
+ case NvRmModuleID_Spdif:
+ NV_ASSERT( Instance < 1 );
+ CLOCK_ENABLE( hDevice, L, SPDIF, ClockState );
+ break;
+ case NvRmModuleID_Nand:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, NDFLASH, ClockState );
+ break;
+ case NvRmModuleID_I2c:
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, L, I2C1, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, H, I2C2, ClockState );
+ } else if (Instance == 2)
+ {
+ CLOCK_ENABLE( hDevice, U, I2C3, ClockState );
+ } else
+ {
+ NV_ASSERT(!"Invalid I2C instance");
+ }
+ break;
+ case NvRmPrivModuleID_Gpio:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, GPIO, ClockState );
+ break;
+ case NvRmModuleID_Uart:
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, L, UARTA, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, L, UARTB, ClockState );
+ }
+ else if ( Instance == 2)
+ {
+ CLOCK_ENABLE( hDevice, H, UARTC, ClockState );
+ } else if (Instance == 3)
+ {
+ CLOCK_ENABLE( hDevice, U, UARTD, ClockState );
+ } else if ( Instance == 4)
+ {
+ CLOCK_ENABLE( hDevice, U, UARTE, ClockState );
+ } else
+ {
+ NV_ASSERT(!"Invlaid UART instance");
+ }
+ break;
+ case NvRmModuleID_Vfir:
+ // Same as UARTB
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, UARTB, ClockState );
+ break;
+ case NvRmModuleID_Ac97:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, AC97, ClockState );
+ break;
+ case NvRmModuleID_Rtc:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, RTC, ClockState );
+ break;
+ case NvRmModuleID_Timer:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, TMR, ClockState );
+ break;
+ case NvRmModuleID_BseA:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, BSEA, ClockState );
+ break;
+ case NvRmModuleID_Vde:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, VDE, ClockState );
+ CLOCK_ENABLE( hDevice, H, BSEV, ClockState );
+ break;
+ case NvRmModuleID_Mpe:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, MPE, ClockState );
+ break;
+ case NvRmModuleID_Tvo:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, TVO, ClockState );
+ CLOCK_ENABLE( hDevice, H, TVDAC, ClockState );
+ break;
+ case NvRmModuleID_Csi:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, CSI, ClockState );
+ break;
+ case NvRmModuleID_Hdmi:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, HDMI, ClockState );
+ break;
+ case NvRmModuleID_Mipi:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, MIPI, ClockState );
+ break;
+ case NvRmModuleID_Dsi:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, DSI, ClockState );
+ break;
+ case NvRmModuleID_Xio:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, XIO, ClockState );
+ break;
+ case NvRmModuleID_Spi:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, SPI1, ClockState );
+ break;
+ case NvRmModuleID_Fuse:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, FUSE, ClockState );
+ break;
+ case NvRmModuleID_KFuse:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, KFUSE, ClockState );
+ break;
+ case NvRmModuleID_Slink:
+ // Supporting only the slink controller.
+ NV_ASSERT( Instance < 4 );
+ if( Instance == 0 )
+ {
+ CLOCK_ENABLE( hDevice, H, SBC1, ClockState );
+ }
+ else if( Instance == 1 )
+ {
+ CLOCK_ENABLE( hDevice, H, SBC2, ClockState );
+ }
+ else if ( Instance == 2)
+ {
+ CLOCK_ENABLE( hDevice, H, SBC3, ClockState );
+ }
+ else if ( Instance == 3)
+ {
+ CLOCK_ENABLE( hDevice, U, SBC4, ClockState );
+ }
+ break;
+ case NvRmModuleID_Dvc:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, DVC_I2C, ClockState );
+ break;
+ case NvRmModuleID_Pmif:
+ NV_ASSERT( Instance == 0 );
+ // PMC clock must not be disabled
+ if (ClockState == ModuleClockState_Enable)
+ CLOCK_ENABLE( hDevice, H, PMC, ClockState );
+ break;
+ case NvRmModuleID_SysStatMonitor:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, STAT_MON, ClockState );
+ break;
+ case NvRmModuleID_Kbc:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, KBC, ClockState );
+ break;
+ case NvRmPrivModuleID_ApbDma:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, APBDMA, ClockState );
+ break;
+ case NvRmPrivModuleID_MemoryController:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, MEM, ClockState );
+ break;
+ case NvRmPrivModuleID_ExternalMemoryController:
+ {
+ // FIXME: should this be allowed?
+ NvU32 reg;
+
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, EMC, ClockState );
+
+ reg = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0, CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_EMC, EMC_2X_CLK_ENB, 1, reg);
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_EMC, EMC_1X_CLK_ENB, 1, reg);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0, CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0, reg);
+ }
+ break;
+ case NvRmModuleID_Cpu:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, L, CPU, ClockState );
+ break ;
+ case NvRmModuleID_SyncNor:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, H, SNOR, ClockState );
+ break;
+ case NvRmModuleID_AvpUcq:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, U, AVPUCQ, ClockState );
+ break;
+ case NvRmModuleID_OneWire:
+ NV_ASSERT( Instance == 0 );
+ CLOCK_ENABLE( hDevice, U, OWR, ClockState );
+ break;
+ case NvRmPrivModuleID_Pcie:
+ NV_ASSERT( Instance == 0 );
+ // Keep in sync both PCIE wrapper (AFI) and core clocks
+ CLOCK_ENABLE( hDevice, U, PCIE, ClockState );
+ CLOCK_ENABLE( hDevice, U, AFI, ClockState );
+ break;
+
+ default:
+ NV_ASSERT(!" Unknown NvRmModuleID passed to Ap20EnableModuleClock(). ");
+ }
+
+ if (ClockState == ModuleClockState_Disable)
+ {
+ NvRmPrivConfigureClockSource(hDevice, ModuleId, NV_FALSE);
+ }
+}
+
+void
+Ap20EnableTvDacClock(
+ NvRmDeviceHandle hDevice,
+ ModuleClockState ClockState)
+{
+ CLOCK_ENABLE( hDevice, H, TVDAC, ClockState );
+}
+
+/*****************************************************************************/
+
+void
+NvRmPrivAp20SetPmuIrqPolarity(
+ NvRmDeviceHandle hRmDevice,
+ NvOdmInterruptPolarity Polarity)
+{
+ NvU32 value = (Polarity == NvOdmInterruptPolarity_Low) ? 1 : 0;
+
+ // PMU interrupt polarity is set via PMC control register. OS kernel access
+ // to this register is limited to single thread env. On RM level this r-m-w
+ // is protected by RmOpen() serialization.
+ NvU32 reg = NV_REGR(hRmDevice, NvRmModuleID_Pmif, 0, APBDEV_PMC_CNTRL_0);
+ reg = NV_FLD_SET_DRF_NUM(APBDEV_PMC, CNTRL, INTR_POLARITY, value, reg);
+ NV_REGW(hRmDevice, NvRmModuleID_Pmif, 0, APBDEV_PMC_CNTRL_0, reg);
+}
+
+// KBC reset is available in the pmc control register.
+#define RESET_KBC( rm, delay ) \
+ do { \
+ NvU32 reg; \
+ reg = NV_REGR((rm), NvRmModuleID_Pmif, 0, APBDEV_PMC_CNTRL_0); \
+ reg = NV_FLD_SET_DRF_DEF(APBDEV_PMC, CNTRL, KBC_RST, ENABLE, reg); \
+ NV_REGW((rm), NvRmModuleID_Pmif, 0, APBDEV_PMC_CNTRL_0, reg); \
+ if (hold) \
+ {\
+ break; \
+ }\
+ NvOsWaitUS(delay); \
+ reg = NV_REGR((rm), NvRmModuleID_Pmif, 0, APBDEV_PMC_CNTRL_0); \
+ reg = NV_FLD_SET_DRF_DEF(APBDEV_PMC, CNTRL, KBC_RST, DISABLE, reg); \
+ NV_REGW((rm), NvRmModuleID_Pmif, 0, APBDEV_PMC_CNTRL_0, reg); \
+ } while( 0 )
+
+// Use PMC control to reset the entire SoC. Just wait forever after reset is
+// issued - h/w would auto-clear it and restart SoC
+#define RESET_SOC( rm ) \
+ do { \
+ NvU32 reg; \
+ reg = NV_REGR((rm), NvRmModuleID_Pmif, 0, APBDEV_PMC_CNTRL_0); \
+ reg = NV_FLD_SET_DRF_DEF(APBDEV_PMC, CNTRL, MAIN_RST, ENABLE, reg); \
+ NV_REGW((rm), NvRmModuleID_Pmif, 0, APBDEV_PMC_CNTRL_0, reg); \
+ for (;;) ; \
+ } while( 0 )
+
+void AP20ModuleReset(NvRmDeviceHandle hDevice, NvRmModuleID ModuleId, NvBool hold)
+{
+ // Extract module and instance from composite module id.
+ NvU32 Module = NVRM_MODULE_ID_MODULE( ModuleId );
+ NvU32 Instance = NVRM_MODULE_ID_INSTANCE( ModuleId );
+
+ // Note that VDE has different reset sequence requirement
+ // FIMXE: NV blocks - hot reset issues
+ #define RESET( rm, offset, field, delay ) \
+ do { \
+ NvU32 reg; \
+ reg = NV_DRF_NUM(CLK_RST_CONTROLLER, RST_DEV_##offset##_SET, SET_##field##_RST, 1); \
+ NV_REGW((rm), NvRmPrivModuleID_ClockAndReset, 0, CLK_RST_CONTROLLER_RST_DEV_##offset##_SET_0, reg); \
+ if (hold) \
+ { \
+ break; \
+ } \
+ NvOsWaitUS( (delay) ); \
+ reg = NV_DRF_NUM(CLK_RST_CONTROLLER, RST_DEV_##offset##_CLR, CLR_##field##_RST, 1); \
+ NV_REGW((rm), NvRmPrivModuleID_ClockAndReset, 0, CLK_RST_CONTROLLER_RST_DEV_##offset##_CLR_0, reg); \
+ } while( 0 )
+
+
+ switch( Module ) {
+ case NvRmPrivModuleID_MemoryController:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, MEM, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Kbc:
+ NV_ASSERT( Instance == 0 );
+ RESET_KBC(hDevice, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_SysStatMonitor:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, STAT_MON, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Pmif:
+ NV_ASSERT( Instance == 0 );
+ NV_ASSERT(!"PMC reset is not allowed, and does nothing on AP20");
+ break;
+ case NvRmModuleID_Fuse:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, FUSE, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_KFuse:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, KFUSE, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Slink:
+ // Supporting only the slink controller.
+ NV_ASSERT( Instance < 4 );
+ if( Instance == 0 )
+ {
+ RESET( hDevice, H, SBC1, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, H, SBC2, NVRM_RESET_DELAY );
+ }
+ else if (Instance == 2)
+ {
+ RESET( hDevice, H, SBC3, NVRM_RESET_DELAY );
+ } else if (Instance == 3)
+ {
+ RESET( hDevice, U, SBC4, NVRM_RESET_DELAY );
+ }
+ break;
+ case NvRmModuleID_Spi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, SPI1, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Xio:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, XIO, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Dvc:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, DVC_I2C, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Dsi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, DSI, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Tvo:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, TVO, NVRM_RESET_DELAY );
+ RESET( hDevice, H, TVDAC, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Mipi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, MIPI, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Hdmi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, HDMI, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Csi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, CSI, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_I2c:
+ if( Instance == 0 )
+ {
+ RESET( hDevice, L, I2C1, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, H, I2C2, NVRM_RESET_DELAY );
+ } else if (Instance == 2)
+ {
+ RESET( hDevice, U, I2C3, NVRM_RESET_DELAY );
+ } else
+ {
+ NV_ASSERT(!"Invalid I2C instace");
+ }
+ break;
+ case NvRmModuleID_Mpe:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, MPE, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Vde:
+ NV_ASSERT( Instance == 0 );
+ {
+ NvU32 reg;
+
+ reg = NV_DRF_NUM(CLK_RST_CONTROLLER, RST_DEV_H_SET,
+ SET_VDE_RST, 1)
+ | NV_DRF_NUM(CLK_RST_CONTROLLER, RST_DEV_H_SET,
+ SET_BSEV_RST, 1);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEV_H_SET_0, reg);
+
+ if (hold)
+ {
+ break;
+ }
+ NvOsWaitUS( NVRM_RESET_DELAY );
+
+ reg = NV_DRF_NUM(CLK_RST_CONTROLLER, RST_DEV_H_CLR,
+ CLR_VDE_RST, 1)
+ | NV_DRF_NUM(CLK_RST_CONTROLLER, RST_DEV_H_CLR,
+ CLR_BSEV_RST, 1);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEV_H_CLR_0, reg);
+ }
+ break;
+ case NvRmModuleID_BseA:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, BSEA, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Cpu:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, CPU, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Avp:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, COP, NVRM_RESET_DELAY );
+ break;
+ case NvRmPrivModuleID_System:
+ /* THIS WILL DO A FULL SYSTEM RESET */
+ NV_ASSERT( Instance == 0 );
+ RESET_SOC(hDevice);
+ break;
+ case NvRmModuleID_Ac97:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, AC97, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Rtc:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, RTC, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Timer:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, TMR, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Uart:
+ if( Instance == 0 )
+ {
+ RESET( hDevice, L, UARTA, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, L, UARTB, NVRM_RESET_DELAY );
+ }
+ else if ( Instance == 2)
+ {
+ RESET( hDevice, H, UARTC, NVRM_RESET_DELAY );
+ } else if (Instance == 3)
+ {
+ RESET( hDevice, U, UARTD, NVRM_RESET_DELAY );
+ } else if (Instance == 4)
+ {
+ RESET( hDevice, U, UARTE, NVRM_RESET_DELAY );
+ } else
+ {
+ NV_ASSERT(!"Invalid UART instance");
+ }
+ break;
+ case NvRmModuleID_Vfir:
+ // Same as UARTB
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, UARTB, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Sdio:
+ if( Instance == 0 )
+ {
+ RESET( hDevice, L, SDMMC1, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, L, SDMMC2, NVRM_RESET_DELAY );
+ } else if (Instance == 2)
+ {
+ RESET( hDevice, U, SDMMC3, NVRM_RESET_DELAY );
+ } else if (Instance == 3)
+ {
+ RESET( hDevice, L, SDMMC4, NVRM_RESET_DELAY );
+ } else
+ {
+ NV_ASSERT(!"Invalid SDIO instance");
+ }
+ break;
+ case NvRmModuleID_Spdif:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, SPDIF, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_I2s:
+ if( Instance == 0 )
+ {
+ RESET( hDevice, L, I2S1, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, L, I2S2, NVRM_RESET_DELAY );
+ } else
+ {
+ NV_ASSERT(!"Invalid I2S instance");
+ }
+ break;
+ case NvRmModuleID_Nand:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, NDFLASH, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Twc:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, TWC, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Pwm:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, PWM, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Epp:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, EPP, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Vi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, VI, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_3D:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, 3D, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_2D:
+ NV_ASSERT( Instance == 0 );
+ // RESET( hDevice, L, 2D, NVRM_RESET_DELAY );
+ // WAR for bug 364497, se also NvRmPrivAP20Reset2D()
+ NV_ASSERT(!"2D reset after RM open is no longer allowed");
+ break;
+ case NvRmModuleID_Usb2Otg:
+ if (Instance == 0)
+ {
+ RESET( hDevice, L, USBD, NVRM_RESET_DELAY );
+ } else if (Instance == 1)
+ {
+ RESET( hDevice, H, USB2, NVRM_RESET_DELAY );
+ } else if (Instance == 2)
+ {
+ RESET( hDevice, H, USB3, NVRM_RESET_DELAY );
+ } else
+ {
+ NV_ASSERT(!"Invalid USB instance");
+ }
+ break;
+ case NvRmModuleID_Isp:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, ISP, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Ide:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, IDE, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_Display:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ RESET( hDevice, L, DISP1, NVRM_RESET_DELAY );
+ }
+ else if( Instance == 1 )
+ {
+ RESET( hDevice, L, DISP2, NVRM_RESET_DELAY );
+ }
+ break;
+ case NvRmModuleID_Vcp:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, VCP, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_CacheMemCtrl:
+ NV_ASSERT( Instance < 2 );
+ if( Instance == 0 )
+ {
+ NV_ASSERT(!"There is not such module on AP20");
+ }
+ else if ( Instance == 1 )
+ {
+ RESET( hDevice, L, CACHE2, NVRM_RESET_DELAY );
+ }
+ break;
+ case NvRmPrivModuleID_ApbDma:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, APBDMA, NVRM_RESET_DELAY );
+ break;
+ case NvRmPrivModuleID_Gpio:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, GPIO, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_GraphicsHost:
+ // FIXME: should this be allowed?
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, L, HOST1X, NVRM_RESET_DELAY );
+ break;
+ case NvRmPrivModuleID_PcieXclk:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, U, PCIEXCLK, NVRM_RESET_DELAY );
+ break;
+ case NvRmPrivModuleID_Pcie:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, U, PCIE, NVRM_RESET_DELAY );
+ break;
+ case NvRmPrivModuleID_Afi:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, U, AFI, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_SyncNor:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, H, SNOR, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_AvpUcq:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, U, AVPUCQ, NVRM_RESET_DELAY );
+ break;
+ case NvRmModuleID_OneWire:
+ NV_ASSERT( Instance == 0 );
+ RESET( hDevice, U, OWR, NVRM_RESET_DELAY );
+ break;
+
+ default:
+ NV_ASSERT(!"Invalid ModuleId");
+ }
+
+ #undef RESET
+}
+
+static void
+NvRmPrivContentProtectionFuses( NvRmDeviceHandle hRm )
+{
+ NvU32 reg;
+ NvU32 clk_rst;
+
+ /* need to set FUSE_RESERVED_PRODUCTION_0 to 0x3,
+ * enable the bypass and write access
+ *
+ * bit 0: macrovision
+ * bit 1: hdcp
+ */
+
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Enable fuse clock
+ Ap20EnableModuleClock(hRm, NvRmModuleID_Fuse, NV_TRUE);
+#endif
+
+ /**
+ * This order is IMPORTANT. Fuse bypass doesn't seem to work with
+ * different ordering.
+ */
+
+ clk_rst = NV_REGR( hRm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0 );
+ clk_rst = NV_FLD_SET_DRF_NUM( CLK_RST_CONTROLLER, MISC_CLK_ENB,
+ CFG_ALL_VISIBLE, 1, clk_rst );
+ NV_REGW( hRm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0, clk_rst );
+
+ reg = NV_REGR( hRm, NvRmModuleID_Fuse, 0, FUSE_FUSEBYPASS_0);
+ reg = NV_FLD_SET_DRF_DEF( FUSE, FUSEBYPASS, FUSEBYPASS_VAL, ENABLED, reg );
+ NV_REGW( hRm, NvRmModuleID_Fuse, 0, FUSE_FUSEBYPASS_0, reg );
+
+ reg = NV_REGR( hRm, NvRmModuleID_Fuse, 0, FUSE_WRITE_ACCESS_SW_0);
+ reg = NV_FLD_SET_DRF_DEF( FUSE, WRITE_ACCESS_SW, WRITE_ACCESS_SW_CTRL,
+ READWRITE, reg);
+ NV_REGW( hRm, NvRmModuleID_Fuse, 0, FUSE_WRITE_ACCESS_SW_0, reg );
+
+ reg = NV_REGR( hRm, NvRmModuleID_Fuse, 0, FUSE_RESERVED_PRODUCTION_0);
+ reg = NV_FLD_SET_DRF_NUM( FUSE, RESERVED_PRODUCTION,
+ RESERVED_PRODUCTION, 0x3, reg );
+ NV_REGW( hRm, NvRmModuleID_Fuse, 0, FUSE_RESERVED_PRODUCTION_0, reg );
+
+ clk_rst = NV_FLD_SET_DRF_NUM( CLK_RST_CONTROLLER, MISC_CLK_ENB,
+ CFG_ALL_VISIBLE, 0, clk_rst );
+ NV_REGW( hRm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0, clk_rst );
+
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Disable fuse clock
+ Ap20EnableModuleClock(hRm, NvRmModuleID_Fuse, NV_FALSE);
+#endif
+}
+
+// Safe PLLM (max 1000MHz) divider for GPU modules
+#define NVRM_SAFE_GPU_DIVIDER (10)
+
+void
+NvRmPrivAp20Reset2D(NvRmDeviceHandle hRmDevice)
+{
+#if !NV_OAL
+ NvU32 reg, offset;
+ /*
+ * WAR for bug 364497: 2D can not be taken out of reset if VI clock is
+ * running. Therefore, make sure VI clock is disabled and reset 2D here
+ * during RM initialization.
+ */
+ Ap20EnableModuleClock(hRmDevice, NvRmModuleID_Vi,
+ ModuleClockState_Disable);
+
+ // Assert reset to 2D module
+ offset = CLK_RST_CONTROLLER_RST_DEV_L_SET_0;
+ reg = NV_DRF_NUM(CLK_RST_CONTROLLER, RST_DEV_L_SET, SET_2D_RST, 1);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, offset, reg);
+
+ // Enable "known good" configuartion for 2D clock (PLLM as a source)
+ offset = CLK_RST_CONTROLLER_CLK_SOURCE_G2D_0;
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, offset,
+ (NV_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_G2D, G2D_CLK_DIVISOR,
+ NVRM_SAFE_GPU_DIVIDER) |
+ NV_DRF_DEF(CLK_RST_CONTROLLER, CLK_SOURCE_G2D, G2D_CLK_SRC,
+ PLLM_OUT0))
+ );
+ Ap20EnableModuleClock(hRmDevice, NvRmModuleID_2D, ModuleClockState_Enable);
+ NvOsWaitUS(NVRM_RESET_DELAY);
+
+ // Take 2D out of reset and disable 2D clock. Both VI and 2D clocks are
+ // left disabled -it is up to the resepctive drivers to configure and
+ // enable them later.
+ offset = CLK_RST_CONTROLLER_RST_DEV_L_CLR_0;
+ reg = NV_DRF_NUM(CLK_RST_CONTROLLER, RST_DEV_L_CLR, CLR_2D_RST, 1);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, offset, reg);
+ Ap20EnableModuleClock(hRmDevice, NvRmModuleID_2D,
+ ModuleClockState_Disable);
+#endif
+}
+
+#define NVRM_CONFIG_CLOCK(Module, SrcDef, DivNum) \
+do\
+{\
+ reg = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##Module##_0); \
+ if ((DivNum) > NV_DRF_VAL(CLK_RST_CONTROLLER, CLK_SOURCE_##Module, \
+ Module##_CLK_DIVISOR, reg)) \
+ {\
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_##Module, \
+ Module##_CLK_DIVISOR, (DivNum), reg); \
+ NV_REGW(rm, NvRmPrivModuleID_ClockAndReset, 0, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##Module##_0, reg); \
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY); \
+ }\
+ reg = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, CLK_SOURCE_##Module, \
+ Module##_CLK_SRC, SrcDef, reg); \
+ NV_REGW(rm, NvRmPrivModuleID_ClockAndReset, 0, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##Module##_0, reg); \
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY); \
+ if ((DivNum) < NV_DRF_VAL(CLK_RST_CONTROLLER, CLK_SOURCE_##Module, \
+ Module##_CLK_DIVISOR, reg))\
+ {\
+ reg = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_##Module, \
+ Module##_CLK_DIVISOR, (DivNum), reg); \
+ NV_REGW(rm, NvRmPrivModuleID_ClockAndReset, 0, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##Module##_0, reg); \
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY);\
+ }\
+} while(0)
+
+void
+NvRmPrivAp20BasicReset( NvRmDeviceHandle rm )
+{
+#if !NV_OAL
+ NvU32 reg, ClkOutL, ClkOutH, ClkOutU;
+ ExecPlatform env;
+
+ if (NvRmIsSimulation())
+ {
+ /* the memory system can't be used until the mem_init_done bit has
+ * been set. This is done by the bootrom for production systems.
+ */
+ reg = NV_REGR( rm, NvRmPrivModuleID_Ahb_Arb_Ctrl, 0,
+ AHB_ARBITRATION_XBAR_CTRL_0 );
+ reg = NV_FLD_SET_DRF_DEF( AHB_ARBITRATION, XBAR_CTRL, MEM_INIT_DONE,
+ DONE, reg );
+ NV_REGW( rm, NvRmPrivModuleID_Ahb_Arb_Ctrl, 0,
+ AHB_ARBITRATION_XBAR_CTRL_0, reg );
+ }
+
+ // FIXME: this takes the Big Hammer Approach. Take everything out
+ // of reset and enable all of the clocks. Then keep enabled only boot
+ // clocks and graphics host.
+
+ // save boot clock enable state
+ ClkOutL = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0);
+ ClkOutH = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0);
+ ClkOutU = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0);
+
+ // Enable module clocks
+ // (for U register module clocks are in the low word only)
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_ENB_L_SET_0, 0xFFFFFFFF );
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_ENB_H_SET_0, 0xFFFFFFFF );
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_ENB_U_SET_0, 0x0000FFFF );
+
+ // For AP20 default clock source selection is out of range for some modules
+ // Just copnfigure safe clocks so that reset is propagated correctly
+ env = NvRmPrivGetExecPlatform(rm);
+ if (env == ExecPlatform_Soc)
+ {
+ /*
+ * For peripheral modules default clock source is oscillator, and
+ * it is safe. Special case SPDIFIN - set on PLLP_OUT0/(1+10/2)
+ * and VDE - set on PLLP_OUT0/(1+1/2)
+ */
+ reg = NV_REGR(rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0);
+ if (reg & CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_SPDIF_RST_FIELD)
+ {
+ reg = NV_DRF_DEF(CLK_RST_CONTROLLER, CLK_SOURCE_SPDIF_IN,
+ SPDIFIN_CLK_SRC, PLLP_OUT0) |
+ NV_DRF_NUM(CLK_RST_CONTROLLER, CLK_SOURCE_SPDIF_IN,
+ SPDIFIN_CLK_DIVISOR, 10);
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_IN_0, reg);
+ }
+ NVRM_CONFIG_CLOCK(VDE, PLLP_OUT0, 1);
+
+ /*
+ * For graphic clocks use PLLM_OUT0 as a source, and set divider
+ * so that initial frequency is below maximum module limit
+ */
+ NVRM_CONFIG_CLOCK(HOST1X, PLLM_OUT0, NVRM_SAFE_GPU_DIVIDER);
+ NVRM_CONFIG_CLOCK(EPP, PLLM_OUT0, NVRM_SAFE_GPU_DIVIDER);
+ NVRM_CONFIG_CLOCK(G2D, PLLM_OUT0, NVRM_SAFE_GPU_DIVIDER);
+ NVRM_CONFIG_CLOCK(G3D, PLLM_OUT0, NVRM_SAFE_GPU_DIVIDER);
+ NVRM_CONFIG_CLOCK(MPE, PLLM_OUT0, NVRM_SAFE_GPU_DIVIDER);
+ NVRM_CONFIG_CLOCK(VI, PLLM_OUT0, NVRM_SAFE_GPU_DIVIDER);
+ NVRM_CONFIG_CLOCK(VI_SENSOR, PLLM_OUT0, NVRM_SAFE_GPU_DIVIDER);
+
+ /* Using 144MHz for coresight */
+ NVRM_CONFIG_CLOCK(CSITE, PLLP_OUT0, 1);
+
+ NvOsWaitUS(NVRM_RESET_DELAY);
+ }
+ // Make sure Host1x clock will be kept enabled
+ ClkOutL = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, CLK_OUT_ENB_L,
+ CLK_ENB_HOST1X, ENABLE, ClkOutL);
+ // Make sure VDE, BSEV and BSEA clocks will be kept disabled
+ ClkOutH = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, CLK_OUT_ENB_H,
+ CLK_ENB_VDE, DISABLE, ClkOutH);
+ ClkOutH = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, CLK_OUT_ENB_H,
+ CLK_ENB_BSEV, DISABLE, ClkOutH);
+ ClkOutH = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, CLK_OUT_ENB_H,
+ CLK_ENB_BSEA, DISABLE, ClkOutH);
+
+ // Take modules out of reset
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEV_L_CLR_0, 0xFFFFFFFF );
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEV_H_CLR_0, 0xFFFFFFFF );
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEV_U_CLR_0, 0x0000FFFF );
+
+ // restore clock enable state (= disable those clocks that
+ // were disabled on boot)
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0, ClkOutL );
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0, ClkOutH );
+ NV_REGW( rm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0, ClkOutU );
+
+ /* enable hdcp and macrovision */
+ NvRmPrivContentProtectionFuses( rm );
+
+ // AP15 BasicReset() sets DRAM_CLKSTOP and DRAM_ACPD here.
+ // Should be done by BCT - removing for AP20.
+
+ // AP15 BasicReset() enables stop clock to CPU, while it is halted.
+ // Removed in AP20 as halt on dual core is actually WFE
+#endif // !NV_OAL
+}
+
+void NvRmPrivAp20IoPowerDetectReset(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NV_REGW(hRmDeviceHandle, NvRmModuleID_Pmif, 0,
+ APBDEV_PMC_PWR_DET_VAL_0, APBDEV_PMC_PWR_DET_VAL_0_RESET_VAL);
+}
+
+/*****************************************************************************/
+
+NvError
+NvRmPrivAp20OscDoublerConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz OscKHz)
+{
+ NvU32 reg, Taps;
+ NvError error = NvRmPrivGetOscDoublerTaps(hRmDevice, OscKHz, &Taps);
+
+ if (error == NvSuccess)
+ {
+ // Program delay
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_PROG_DLY_CLK_0);
+ reg = NV_FLD_SET_DRF_NUM(
+ CLK_RST_CONTROLLER, PROG_DLY_CLK, CLK_D_DELCLK_SEL, Taps, reg);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_PROG_DLY_CLK_0, reg);
+ // Enable doubler
+ reg = NV_DRF_NUM(
+ CLK_RST_CONTROLLER, CLK_ENB_U_SET, SET_CLK_M_DOUBLER_ENB, 1);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_ENB_U_SET_0, reg);
+ }
+ else
+ {
+ // Disable doubler
+ reg = NV_DRF_NUM(
+ CLK_RST_CONTROLLER, CLK_ENB_U_CLR, CLR_CLK_M_DOUBLER_ENB, 1);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_ENB_U_CLR_0, reg);
+ }
+ return error;
+}
+
+#define APBDEV_PMC_SCRATCH42_0_PCX_CLAMP_RANGE 0:0
+
+#define NVRM_PCIE_REF_FREQUENCY (12000)
+
+void NvRmPrivAp20PllEControl(NvRmDeviceHandle hRmDevice, NvBool Enable)
+{
+ static NvBool s_Started = NV_FALSE;
+
+ NvU32 base, reg, offset;
+
+ if (NvRmPrivGetExecPlatform(hRmDevice) != ExecPlatform_Soc)
+ return;
+
+ if (NvRmPowerGetPrimaryFrequency(hRmDevice) != NVRM_PCIE_REF_FREQUENCY)
+ {
+ NV_ASSERT(!"Not supported primary frequency");
+ return;
+ }
+
+ // No run time power management for PCIE PLL - once started, it will never
+ // be disabled
+ if (s_Started || !Enable)
+ return;
+
+ s_Started = NV_TRUE;
+
+ // Set PLLE base = 0x0D18C801 (configured, but disabled)
+ offset = CLK_RST_CONTROLLER_PLLE_BASE_0;
+ base= NV_DRF_DEF(CLK_RST_CONTROLLER, PLLE_BASE, PLLE_ENABLE_CML, DISABLE) |
+ NV_DRF_DEF(CLK_RST_CONTROLLER, PLLE_BASE, PLLE_ENABLE, DISABLE) |
+ NV_DRF_NUM(CLK_RST_CONTROLLER, PLLE_BASE, PLLE_PLDIV_CML, 0x0D) |
+ NV_DRF_NUM(CLK_RST_CONTROLLER, PLLE_BASE, PLLE_PLDIV, 0x18) |
+ NV_DRF_NUM(CLK_RST_CONTROLLER, PLLE_BASE, PLLE_NDIV, 0xC8) |
+ NV_DRF_NUM(CLK_RST_CONTROLLER, PLLE_BASE, PLLE_MDIV, 0x01);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, offset, base);
+
+ // Remove clamping
+ offset = APBDEV_PMC_SCRATCH42_0;
+ reg = NV_REGR(hRmDevice, NvRmModuleID_Pmif, 0, offset);
+ reg = NV_FLD_SET_DRF_NUM(APBDEV_PMC, SCRATCH42, PCX_CLAMP, 0x1, reg);
+ NV_REGW(hRmDevice, NvRmModuleID_Pmif, 0, offset, reg);
+
+ NvOsWaitUS(NVRM_CLOCK_CHANGE_DELAY); // wait > 1us
+
+ reg = NV_FLD_SET_DRF_NUM(APBDEV_PMC, SCRATCH42, PCX_CLAMP, 0x0, reg);
+ NV_REGW(hRmDevice, NvRmModuleID_Pmif, 0, offset, reg);
+
+ // Poll PLLE ready
+ offset = CLK_RST_CONTROLLER_PLLE_MISC_0;
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, offset);
+ while (!(NV_DRF_VAL(CLK_RST_CONTROLLER, PLLE_MISC, PLLE_PLL_READY, reg)))
+ {
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, offset);
+ }
+
+ // Set PLLE base = 0xCD18C801 (configured and enabled)
+ offset = CLK_RST_CONTROLLER_PLLE_BASE_0;
+ base = NV_FLD_SET_DRF_DEF(
+ CLK_RST_CONTROLLER, PLLE_BASE, PLLE_ENABLE_CML, ENABLE, base);
+ base = NV_FLD_SET_DRF_DEF(
+ CLK_RST_CONTROLLER, PLLE_BASE, PLLE_ENABLE, ENABLE, base);
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0, offset, base);
+
+ // use MIPI PLL delay for now - TODO: confirm or find PLLE specific
+ NvOsWaitUS(NVRM_PLL_MIPI_STABLE_DELAY_US);
+}
+
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clocks.h b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clocks.h
new file mode 100644
index 000000000000..389778a5334a
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clocks.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+
+#ifndef INCLUDED_AP20RM_CLOCKS_H
+#define INCLUDED_AP20RM_CLOCKS_H
+
+#include "nvrm_clocks.h"
+#include "nvodm_query_memc.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+extern const NvRmModuleClockInfo g_Ap20ModuleClockTable[];
+extern const NvU32 g_Ap20ModuleClockTableSize;
+
+// Minimum PLLX VCO frequency for reliable operation of DCC circuit
+#define NVRM_PLLX_DCC_VCO_MIN (600000)
+
+// Default PLLC output frequency
+#define NVRM_PLLC_DEFAULT_FREQ_KHZ (600000)
+
+// Defines number of EMC frequency steps for DFS
+#define NVRM_AP20_DFS_EMC_FREQ_STEPS (8)
+
+// Defines maximum APB frequency (bug 559823)
+#define NVRM_AP20_APB_MAX_KHZ (125000)
+
+/**
+ * Defines frequency steps derived from PLLP0 fixed output to be used as System
+ * clock source frequency. The frequency specified in kHz, and it will be rounded
+ * up to the closest divider output.
+ */
+#define NVRM_AP20_PLLP_POLICY_SYSTEM_CLOCK \
+ PLLP_POLICY_ENTRY(54000) /* PLLP divider 6, output frequency 54,000kHz */ \
+ PLLP_POLICY_ENTRY(72000) /* PLLP divider 4, output frequency 72,000kHz */ \
+ PLLP_POLICY_ENTRY(108000) /* PLLP divider 2, output frequency 108,000kHz */ \
+ PLLP_POLICY_ENTRY(144000) /* PLLP divider 1, output frequency 144,000kHz */ \
+ PLLP_POLICY_ENTRY(216000) /* PLLP divider 0, output frequency 216,000kHz */
+
+/**
+ * Defines frequency steps derived from PLLP0 fixed output to be used as CPU
+ * clock source frequency. The frequency specified in kHz, and it will be rounded
+ * up to the closest divider output. On AP20 we will use only main PLLP0 output,
+ * and no divided down steps, so that PLLP_OUT4 divider output is available as
+ * a source for external devices.
+ */
+#define NVRM_AP20_PLLP_POLICY_CPU_CLOCK \
+ PLLP_POLICY_ENTRY(216000) /* PLLP divider 0, output frequency 216,000kHz */
+
+// On AP20 PLLP4 is used as 24MHz source for external devices. This setting will
+// overwrite initial PLLP4 frequency after boot/resume from LP0.
+#define NVRM_AP20_FIXED_PLLP4_SETTING (16) /* 216 / (1 + 16/2) = 24 */
+
+/**
+ * Combines EMC 2x frequency and the respective set of EMC timing parameters for
+ * pre-defined EMC configurations (DDR clock is running at EMC 1x frequency)
+ */
+typedef struct NvRmAp20EmcTimingConfigRec
+{
+ NvRmFreqKHz Emc2xKHz;
+ const NvOdmSdramControllerConfigAdv* pOdmEmcConfig;
+ NvU32 Emc2xClockSource;
+ NvU32 Emc2xDivisor;
+ NvU32 Emc2xUndividedIndex;
+ NvRmFreqKHz CpuLimitKHz;
+} NvRmAp20EmcTimingConfig;
+
+/*****************************************************************************/
+
+/**
+ * Enables/disables module clock.
+ *
+ * @param hDevice The RM device handle.
+ * @param ModuleId Combined module ID and instance of the target module.
+ * @param ClockState Target clock state.
+ */
+void
+Ap20EnableModuleClock(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId,
+ ModuleClockState ClockState);
+
+// Separate API to control TVDAC clock independently of TVO
+// (when TVDAC is used for CRT)
+void
+Ap20EnableTvDacClock(
+ NvRmDeviceHandle hDevice,
+ ModuleClockState ClockState);
+
+/**
+ * Resets module (assert/delay/deassert reset signal) if the hold paramter is
+ * NV_FLASE. If the hols paramter is NV_TRUE, just assert the reset and return.
+ *
+ * @param hDevice The RM device handle.
+ * @param Module Combined module ID and instance of the target module.
+ * @param hold To hold or relese the reset.
+ */
+void
+AP20ModuleReset(NvRmDeviceHandle hDevice, NvRmModuleID ModuleId, NvBool hold);
+
+/**
+ * Resets 2D module.
+ *
+ * @param hRmDevice The RM device handle.
+ */
+void
+NvRmPrivAp20Reset2D(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Initializes clock source table.
+ *
+ * @return Pointer to the clock sources descriptor table.
+ */
+NvRmClockSourceInfo* NvRmPrivAp20ClockSourceTableInit(void);
+
+/**
+ * Initializes PLL references table.
+ *
+ * @param pPllReferencesTable A pointer to a pointer which this function sets
+ * to the PLL reference table base.
+ * @param pPllReferencesTableSize A pointer to a variable which this function
+ * sets to the PLL reference table size.
+ */
+void
+NvRmPrivAp20PllReferenceTableInit(
+ NvRmPllReference** pPllReferencesTable,
+ NvU32* pPllReferencesTableSize);
+
+/**
+ * Controls PLLE.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param Enable Specifies if PLLE should be enabled or disabled (PLLE power
+ * management is not supported, and it is never disabled as of now).
+ */
+void NvRmPrivAp20PllEControl(NvRmDeviceHandle hRmDevice, NvBool Enable);
+
+/**
+ * Initializes configuration structures and tables for DVFS controlled clocks.
+ *
+ * @param hRmDevice The RM device handle.
+ */
+void
+NvRmPrivAp20ScaledClockConfigInit(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Configures oscillator (main) clock doubler.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param OscKHz Oscillator (main) clock frequency in kHz.
+ *
+ * @return NvSuccess if the specified oscillator frequency is supported, and
+ * NvError_NotSupported, otherwise.
+ */
+NvError
+NvRmPrivAp20OscDoublerConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz OscKHz);
+
+/**
+ * Configures maximum core and memory clocks.
+ *
+ * @param hRmDevice The RM device handle.
+ */
+void
+NvRmPrivAp20FastClockConfig(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Gets module frequency synchronized with EMC speed.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param Module The target module ID.
+ *
+ * @return Module frequency in kHz.
+ */
+NvRmFreqKHz NvRmPrivAp20GetEmcSyncFreq(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module);
+
+/**
+ * Clips EMC frequency high limit to one of the fixed DFS EMC configurations,
+ * and if necessary adjust CPU high limit respectively.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCpuHighKHz A pointer to the variable, which contains CPU frequency
+ * high limit in KHz (on entry - requested limit, on exit - clipped limit)
+ * @param pEmcHighKHz A pointer to the variable, which contains EMC frequency
+ * high limit in KHz (on entry - requested limit, on exit - clipped limit)
+ */
+void
+NvRmPrivAp20ClipCpuEmcHighLimits(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz* pCpuHighKHz,
+ NvRmFreqKHz* pEmcHighKHz);
+
+/**
+ * Gets frequencies of DFS controlled clocks
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pDfsKHz Output storage pointer for DFS clock frequencies structure
+ * (all frequencies returned in kHz).
+ */
+void
+NvRmPrivAp20DfsClockFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmDfsFrequencies* pDfsKHz);
+
+/**
+ * Configures DFS controlled clocks
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pMaxKHz Pointer to the DFS clock frequencies upper limits
+ * @param pDfsKHz Pointer to the target DFS frequencies structure on entry;
+ * updated with actual DFS clock frequencies on exit.
+ *
+ * @return NV_TRUE if clock configuration is completed; NV_FALSE if this
+ * function has to be called again to complete configuration.
+ */
+NvBool
+NvRmPrivAp20DfsClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDfsFrequencies* pMaxKHz,
+ NvRmDfsFrequencies* pDfsKHz);
+
+
+/**
+ * Configures the sdio tap delay
+ *
+ * @param hRmDevice The RM device handle.
+ * @param Module The target module ID.
+ * @param ClkSourceOffset Clock source offset.
+ * @param ConfiguredFreqKHz The configured frequency in KHz.
+ *
+ */
+void
+NvRmPrivAp20SdioTapDelayConfigure(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID ModuleId,
+ NvU32 ClkSourceOffset,
+ NvRmFreqKHz ConfiguredFreqKHz);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // INCLUDED_AP20RM_CLOCKS_H
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clocks_info.c b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clocks_info.c
new file mode 100644
index 000000000000..0bca2b233664
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clocks_info.c
@@ -0,0 +1,1827 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvrm_drf.h"
+#include "ap20rm_clocks.h"
+#include "ap20/arclk_rst.h"
+#include "nvrm_moduleids.h"
+#include "ap20/project_relocation_table.h"
+
+#define NV_COMMON_CLK_RST_FIELDS_INFO(MODULE, H_L) \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##MODULE##_0, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##MODULE##_0_##MODULE##_CLK_SRC_DEFAULT_MASK, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##MODULE##_0_##MODULE##_CLK_SRC_SHIFT, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##MODULE##_0_##MODULE##_CLK_DIVISOR_DEFAULT_MASK, \
+ CLK_RST_CONTROLLER_CLK_SOURCE_##MODULE##_0_##MODULE##_CLK_DIVISOR_SHIFT, \
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_##H_L##_0, \
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_##H_L##_0_CLK_ENB_##MODULE##_FIELD, \
+ CLK_RST_CONTROLLER_RST_DEVICES_##H_L##_0, \
+ CLK_RST_CONTROLLER_RST_DEVICES_##H_L##_0_SWR_##MODULE##_RST_FIELD
+
+const NvRmModuleClockInfo g_Ap20ModuleClockTable[] =
+{
+ { /* Invalid module */
+ NvRmPrivModuleID_System, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ 0,0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_TRIG_SYS_RST_FIELD,
+ NvRmDiagModuleID_SystemReset
+ },
+ { /* VI controller module - VI clock */
+ NvRmModuleID_Vi, 0 , 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_0_VI_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_0_VI_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_0_VI_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_0_VI_CLK_DIVISOR_SHIFT,
+
+ // Combined VI and VI sensor reset and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_VI_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_VI_RST_FIELD,
+ NvRmDiagModuleID_Vi
+ },
+ { /* VI controller module - VI sensor clock
+ * Module sub clock must immediately follow main clock
+ */
+ NvRmModuleID_Vi, 0 , 1,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_SENSOR_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_SENSOR_0_VI_SENSOR_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_SENSOR_0_VI_SENSOR_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_SENSOR_0_VI_SENSOR_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VI_SENSOR_0_VI_SENSOR_CLK_DIVISOR_SHIFT,
+
+ // Combined VI and VI sensor reset and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_VI_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_VI_RST_FIELD,
+ NvRmDiagModuleID_ViSensor
+ },
+
+ { /* I2S1 controller module */
+ NvRmModuleID_I2s, 0, 0,
+ {
+ NvRmClockSource_PllA0,
+ NvRmClockSource_AudioSync,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(I2S1, L),
+ NvRmDiagModuleID_I2s
+ },
+
+ { /* I2S2 controller module */
+ NvRmModuleID_I2s, 1, 0,
+ {
+ NvRmClockSource_PllA0,
+ NvRmClockSource_AudioSync,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(I2S2, L),
+ NvRmDiagModuleID_I2s
+ },
+
+ { /* I2C1 controller module */
+ NvRmModuleID_I2c, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Integer_1,
+ NV_COMMON_CLK_RST_FIELDS_INFO(I2C1, L),
+ NvRmDiagModuleID_I2c
+ },
+
+ { /* I2C2 controller module */
+ NvRmModuleID_I2c, 1, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Integer_1,
+ NV_COMMON_CLK_RST_FIELDS_INFO(I2C2, H),
+ NvRmDiagModuleID_I2c
+ },
+ { /* I2C2 controller module */
+ NvRmModuleID_I2c, 2, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Integer_1,
+ NV_COMMON_CLK_RST_FIELDS_INFO(I2C3, U),
+ NvRmDiagModuleID_I2c
+ },
+
+ { /* S/PDIF controller module - S/PDIF OUT clock */
+ NvRmModuleID_Spdif, 0, 0,
+ {
+ NvRmClockSource_PllA0,
+ NvRmClockSource_AudioSync,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_OUT_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_OUT_0_SPDIFOUT_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_OUT_0_SPDIFOUT_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_OUT_0_SPDIFOUT_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_OUT_0_SPDIFOUT_CLK_DIVISOR_SHIFT,
+
+ // Combined SPDIF reset and and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_SPDIF_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_SPDIF_RST_FIELD,
+ NvRmDiagModuleID_Spdif
+ },
+ { /* S/PDIF controller module - S/PDIF IN clock
+ * Module sub clock must immediately follow main clock
+ */
+ NvRmModuleID_Spdif, 0, 1,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_IN_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_IN_0_SPDIFIN_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_IN_0_SPDIFIN_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_IN_0_SPDIFIN_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_IN_0_SPDIFIN_CLK_DIVISOR_SHIFT,
+
+ // Combined SPDIF reset and and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_SPDIF_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_SPDIF_RST_FIELD,
+ NvRmDiagModuleID_SpdifIn
+ },
+
+ { /* PWM controller module */
+ NvRmModuleID_Pwm, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_AudioSync,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(PWM, L),
+ NvRmDiagModuleID_Pwm
+ },
+
+ { /* SPI controller module */
+ NvRmModuleID_Spi, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SPI1, H),
+ NvRmDiagModuleID_Spi
+ },
+
+ { /* SBC1 controller module */
+ NvRmModuleID_Slink, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SBC1, H),
+ NvRmDiagModuleID_Sbc
+ },
+
+ { /* SBC2 controller module */
+ NvRmModuleID_Slink, 1, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SBC2, H),
+ NvRmDiagModuleID_Sbc
+ },
+
+ { /* SBC3 controller module */
+ NvRmModuleID_Slink, 2, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SBC3, H),
+ NvRmDiagModuleID_Sbc
+ },
+
+ { /* SBC4 controller module */
+ NvRmModuleID_Slink, 3, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SBC4, U),
+ NvRmDiagModuleID_Sbc
+ },
+
+ { /* TWC controller module */
+ NvRmModuleID_Twc, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(TWC, L),
+ NvRmDiagModuleID_Twc
+ },
+
+ { /* XIO controller module */
+ NvRmModuleID_Xio, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(XIO, H),
+ NvRmDiagModuleID_Xio
+ },
+
+ { /* IDE controller module */
+ NvRmModuleID_Ide, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(IDE, L),
+ NvRmDiagModuleID_Ide
+ },
+
+ { /* SDIO1 controller module */
+ NvRmModuleID_Sdio, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SDMMC1, L),
+ NvRmDiagModuleID_Sdio
+ },
+
+ { /* SDIO2 controller module */
+ NvRmModuleID_Sdio, 1, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SDMMC2, L),
+ NvRmDiagModuleID_Sdio
+ },
+
+ { /* SDIO3 controller module */
+ NvRmModuleID_Sdio, 2, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SDMMC3, U),
+ NvRmDiagModuleID_Sdio
+ },
+
+ { /* SDIO4 controller module */
+ NvRmModuleID_Sdio, 3, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(SDMMC4, L),
+ NvRmDiagModuleID_Sdio
+ },
+
+ { /* NAND Flash controller module */
+ NvRmModuleID_Nand, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(NDFLASH, L),
+ NvRmDiagModuleID_NandFlash
+ },
+
+ { /* MIPI BB controller module */
+ NvRmModuleID_Mipi, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(MIPI, H),
+ NvRmDiagModuleID_MipiBaseband
+ },
+
+ { /* DVC controller module */
+ NvRmModuleID_Dvc, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Integer_1,
+ NV_COMMON_CLK_RST_FIELDS_INFO(DVC_I2C, H),
+ NvRmDiagModuleID_Dvc
+ },
+
+ { /* UARTA controller module */
+ NvRmModuleID_Uart, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_None,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTA_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTA_0_UARTA_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTA_0_UARTA_CLK_SRC_SHIFT,
+ 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_UARTA_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_UARTA_RST_FIELD,
+ NvRmDiagModuleID_Uart
+ },
+
+ { /* UARTB controller module */
+ NvRmModuleID_Uart, 1, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_None,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTB_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTB_0_UARTB_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTB_0_UARTB_CLK_SRC_SHIFT,
+ 0, 0,
+
+ // Combined UARTB and VFIR reset and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_UARTB_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_UARTB_RST_FIELD,
+ NvRmDiagModuleID_Uart
+ },
+
+ { /* UARTC controller module */
+ NvRmModuleID_Uart, 2, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_None,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTC_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTC_0_UARTC_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTC_0_UARTC_CLK_SRC_SHIFT,
+ 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_UARTC_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_UARTC_RST_FIELD,
+ NvRmDiagModuleID_Uart
+ },
+
+ { /* UARTD controller module */
+ NvRmModuleID_Uart, 3, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_None,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTD_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTD_0_UARTD_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTD_0_UARTD_CLK_SRC_SHIFT,
+ 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0_CLK_ENB_UARTD_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_U_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_U_0_SWR_UARTD_RST_FIELD,
+ NvRmDiagModuleID_Uart
+ },
+
+ { /* UARTE controller module */
+ NvRmModuleID_Uart, 4, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_None,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTE_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTE_0_UARTE_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_UARTE_0_UARTE_CLK_SRC_SHIFT,
+ 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0_CLK_ENB_UARTE_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_U_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_U_0_SWR_UARTE_RST_FIELD,
+ NvRmDiagModuleID_Uart
+ },
+
+ { /* VFIR controller module */
+ NvRmModuleID_Vfir, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VFIR_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VFIR_0_VFIR_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VFIR_0_VFIR_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VFIR_0_VFIR_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VFIR_0_VFIR_CLK_DIVISOR_SHIFT,
+
+ // Combined UARTB and VFIR reset and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_UARTB_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_UARTB_RST_FIELD,
+ NvRmDiagModuleID_Vfir
+ },
+
+ { /* Host1x module */
+ NvRmModuleID_GraphicsHost, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(HOST1X, L),
+ NvRmDiagModuleID_Host1x
+ },
+
+ { /* EPP controller module */
+ NvRmModuleID_Epp, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(EPP, L),
+ NvRmDiagModuleID_Epp
+ },
+
+ { /* MPE controller module */
+ NvRmModuleID_Mpe, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(MPE, H),
+ NvRmDiagModuleID_Mpe
+ },
+
+ { /* 2D controller module */
+ NvRmModuleID_2D, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G2D_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G2D_0_G2D_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G2D_0_G2D_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G2D_0_G2D_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G2D_0_G2D_CLK_DIVISOR_SHIFT,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_2D_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_2D_RST_FIELD,
+ NvRmDiagModuleID_2d
+ },
+
+ { /* 3D controller module */
+ NvRmModuleID_3D, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllA0
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G3D_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G3D_0_G3D_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G3D_0_G3D_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G3D_0_G3D_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_G3D_0_G3D_CLK_DIVISOR_SHIFT,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_3D_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_3D_RST_FIELD,
+ NvRmDiagModuleID_3d
+ },
+
+ { /* Display 1 controller module */
+ NvRmModuleID_Display, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllD0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_None,
+ CLK_RST_CONTROLLER_CLK_SOURCE_DISP1_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_DISP1_0_DISP1_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_DISP1_0_DISP1_CLK_SRC_SHIFT,
+ 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_DISP1_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_DISP1_RST_FIELD,
+ NvRmDiagModuleID_Display
+ },
+
+ { /* Display 2 controller module */
+ NvRmModuleID_Display, 1, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllD0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_None,
+ CLK_RST_CONTROLLER_CLK_SOURCE_DISP2_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_DISP2_0_DISP2_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_DISP2_0_DISP2_CLK_SRC_SHIFT,
+ 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_DISP2_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_DISP2_RST_FIELD,
+ NvRmDiagModuleID_Display
+ },
+
+ { /* TVO controller module - TVO clock */
+ NvRmModuleID_Tvo, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllD0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVO_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVO_0_TVO_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVO_0_TVO_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVO_0_TVO_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVO_0_TVO_CLK_DIVISOR_SHIFT,
+
+ // Combined TVO, and CVE reset and and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_TVO_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_TVO_RST_FIELD,
+ NvRmDiagModuleID_Tvo
+ },
+ { /* TVO controller module - CVE clock
+ * Module sub clocks must immediately follow main clock
+ */
+ NvRmModuleID_Tvo, 0, 1,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllD0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_CVE_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_CVE_0_CVE_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_CVE_0_CVE_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_CVE_0_CVE_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_CVE_0_CVE_CLK_DIVISOR_SHIFT,
+
+ // Combined TVO, and CVE reset and and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_TVO_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_TVO_RST_FIELD,
+ NvRmDiagModuleID_Cve
+ },
+ { /* TVO controller module - TVDAC clock
+ * Module sub clocks must immediately follow main clock
+ */
+ NvRmModuleID_Tvo, 0, 2,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllD0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVDAC_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVDAC_0_TVDAC_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVDAC_0_TVDAC_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVDAC_0_TVDAC_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_TVDAC_0_TVDAC_CLK_DIVISOR_SHIFT,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_TVDAC_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_TVDAC_RST_FIELD,
+ NvRmDiagModuleID_Tvdac
+ },
+
+ { /* HDMI controller module */
+ NvRmModuleID_Hdmi, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllD0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(HDMI, H),
+ NvRmDiagModuleID_Hdmi
+ },
+
+ { /* VDE controller module (VDE and BSEV clocks)
+ * These clocks should always be enabled/reset in sync. Threfore,
+ * no need for separate VDE and BSEV subclock descriptors
+ */
+ NvRmModuleID_Vde, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VDE_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VDE_0_VDE_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VDE_0_VDE_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VDE_0_VDE_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_VDE_0_VDE_CLK_DIVISOR_SHIFT,
+
+ // Combined VDE and BSEV reset and and clock enable controls
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ (CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_VDE_FIELD |
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_BSEV_FIELD),
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ (CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_VDE_RST_FIELD |
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_BSEV_RST_FIELD),
+ NvRmDiagModuleID_Vde
+ },
+
+ { /* BSEA controller module */
+ NvRmModuleID_BseA, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_BSEA_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_BSEA_RST_FIELD,
+ NvRmDiagModuleID_Bsea
+ },
+
+ { /* VCP controller module */
+ NvRmModuleID_Vcp, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_VCP_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_VCP_RST_FIELD,
+ NvRmDiagModuleID_Vcp
+ },
+
+ { /* Timer controller module */
+ NvRmModuleID_Timer, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_TMR_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_TMR_RST_FIELD,
+ NvRmDiagModuleID_Timer
+ },
+
+ { /* System Monitor controller module */
+ NvRmModuleID_SysStatMonitor, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_STAT_MON_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_STAT_MON_RST_FIELD,
+ NvRmDiagModuleID_StatMon
+ },
+
+ { /* GPIO controller module */
+ NvRmPrivModuleID_Gpio, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_GPIO_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_GPIO_RST_FIELD,
+ NvRmDiagModuleID_Gpio
+ },
+
+ { /* USB controller module */
+ NvRmModuleID_Usb2Otg, 0, 0,
+ {
+ NvRmClockSource_PllU0
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_USBD_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_USBD_RST_FIELD,
+ NvRmDiagModuleID_Usb
+ },
+
+ { /* USB2 controller module */
+ NvRmModuleID_Usb2Otg, 1, 0,
+ {
+ NvRmClockSource_PllU0
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_USB2_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_USB2_RST_FIELD,
+ NvRmDiagModuleID_Usb
+ },
+
+ { /* USB3 controller module */
+ NvRmModuleID_Usb2Otg, 2, 0,
+ {
+ NvRmClockSource_PllU0
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_USB3_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_USB3_RST_FIELD,
+ NvRmDiagModuleID_Usb
+ },
+
+ { /* APB DMA controller module */
+ NvRmPrivModuleID_ApbDma, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_APBDMA_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_APBDMA_RST_FIELD,
+ NvRmDiagModuleID_ApbDma
+ },
+
+ { /* AC97 controller module */
+ NvRmModuleID_Ac97, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_AC97_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_AC97_RST_FIELD,
+ NvRmDiagModuleID_Ac97
+ },
+
+ { /* Keyboard controller module */
+ NvRmModuleID_Kbc, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_KBC_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_KBC_RST_FIELD,
+ NvRmDiagModuleID_Kbc
+ },
+
+ { /* RTC controller module */
+ NvRmModuleID_Rtc, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_RTC_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_RTC_RST_FIELD,
+ NvRmDiagModuleID_Rtc
+ },
+
+ { /* Fuse controller module */
+ NvRmModuleID_Fuse, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_FUSE_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_FUSE_RST_FIELD,
+ NvRmDiagModuleID_Fuse
+ },
+
+ { /* KFuse controller module */
+ NvRmModuleID_KFuse, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_KFUSE_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_KFUSE_RST_FIELD,
+ NvRmDiagModuleID_KFuse
+ },
+
+ { /* Power Management controller module */
+ NvRmModuleID_Pmif, 0, 0,
+ {
+ NvRmClockSource_Apb
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_PMC_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_PMC_RST_FIELD,
+ NvRmDiagModuleID_Pmc
+ },
+
+ { /* COP (AVP) cache controller module */
+ NvRmModuleID_CacheMemCtrl, 0, 0,
+ {
+ NvRmClockSource_SystemBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_CACHE2_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_CACHE2_RST_FIELD,
+ NvRmDiagModuleID_Cache
+ },
+
+ { /* DSI controller module */
+ NvRmModuleID_Dsi, 0, 0,
+ {
+ NvRmClockSource_PllD0
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_DSI_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_DSI_RST_FIELD,
+ NvRmDiagModuleID_Dsi
+ },
+
+ { /* CSI controller module */
+ NvRmModuleID_Csi, 0, 0,
+ {
+ NvRmClockSource_SystemBus // TODO: find a proper clock source
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_CSI_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_CSI_RST_FIELD,
+ NvRmDiagModuleID_Csi
+ },
+
+ { /* ISP controller module */
+ NvRmModuleID_Isp, 0, 0,
+ {
+ NvRmClockSource_SystemBus // TODO: find a proper clock source
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_ISP_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_ISP_RST_FIELD,
+ NvRmDiagModuleID_Isp
+ },
+
+ { /* CPU module */
+ NvRmModuleID_Cpu, 0, 0,
+ {
+ NvRmClockSource_CpuBus
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_L_0_CLK_ENB_CPU_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_CPU_RST_FIELD,
+ NvRmDiagModuleID_Cpu
+ },
+
+ { /* COP (AVP) module */
+ NvRmModuleID_Avp, 0, 0,
+ {
+ NvRmClockSource_SystemBus // TODO: Add COP skipper source?
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ 0, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0_SWR_COP_RST_FIELD,
+ NvRmDiagModuleID_Coprocessor
+ },
+
+ {
+ NvRmModuleID_OneWire, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM,
+ },
+ NvRmClockDivider_Fractional_2,
+ NV_COMMON_CLK_RST_FIELDS_INFO(OWR,U),
+ NvRmDiagModuleID_OneWire
+ },
+
+ {
+ NvRmModuleID_SyncNor, 0, 0,
+ {
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM,
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_NOR_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_NOR_0_SNOR_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_NOR_0_SNOR_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_NOR_0_SNOR_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_NOR_0_SNOR_CLK_DIVISOR_SHIFT,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_SNOR_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_SNOR_RST_FIELD,
+ NvRmDiagModuleID_SyncNor
+ },
+
+ {
+ NvRmModuleID_AvpUcq, 0, 0,
+ {
+ NvRmClockSource_SystemBus // TODO: Add COP skipper source?
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0_CLK_ENB_AVPUCQ_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_U_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_U_0_SWR_AVPUCQ_RST_FIELD,
+ NvRmDiagModuleID_AvpUcq
+ },
+
+ {
+ NvRmPrivModuleID_Pcie, 0, 0,
+ {
+ NvRmClockSource_CpuBridge
+ },
+ NvRmClockDivider_None,
+ 0, 0, 0, 0, 0,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0_CLK_ENB_PCIE_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_U_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_U_0_SWR_PCIE_RST_FIELD,
+ NvRmDiagModuleID_Pcie
+ },
+
+ { /* Memory controller module */
+ NvRmPrivModuleID_MemoryController, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_ClkM,
+ },
+ // MC clock is the same as EMC1x domain clock
+ NvRmClockDivider_Integer_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_DIVISOR_SHIFT,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_H_0_CLK_ENB_MEM_FIELD,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_MEM_RST_FIELD,
+ NvRmDiagModuleID_Mc
+ },
+
+ { /* External Memory controller module */
+ NvRmPrivModuleID_ExternalMemoryController, 0, 0,
+ {
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_ClkM,
+ },
+ NvRmClockDivider_Fractional_2,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_SRC_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_SRC_SHIFT,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_DIVISOR_SHIFT,
+
+ // EMC has 1x and 2x domains clock enable bits located in the source
+ // register. There is also a gloabl clock enable bit in CLK_OUT_ENB_L_0
+ // register, which is not described here. All 3 bits are set/cleared
+ // in Ap20EnableModuleClock() function below.
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0,
+ (CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_2X_CLK_ENB_FIELD |
+ CLK_RST_CONTROLLER_CLK_SOURCE_EMC_0_EMC_1X_CLK_ENB_FIELD),
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0,
+ CLK_RST_CONTROLLER_RST_DEVICES_H_0_SWR_EMC_RST_FIELD,
+ NvRmDiagModuleID_Emc
+ }
+};
+
+NvU32 const g_Ap20ModuleClockTableSize = NV_ARRAY_SIZE(g_Ap20ModuleClockTable);
+
+/*****************************************************************************/
+/*****************************************************************************/
+// Clock sources
+
+static const NvRmFixedClockInfo s_Ap20FixedClockTable[] =
+{
+ {
+ NvRmClockSource_ClkS,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ClkM,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ClkD,
+ NvRmClockSource_ClkM,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0_CLK_M_DOUBLER_ENB_FIELD
+ },
+
+ {
+ NvRmClockSource_ExtSpdf,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ExtI2s1,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ExtI2s2,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ExtAc97,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ExtAudio1,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ExtAudio2,
+ NvRmClockSource_Invalid,
+ 0, 0
+ },
+ {
+ NvRmClockSource_ExtVi,
+ NvRmClockSource_Invalid,
+ 0, 0
+ }
+};
+
+static const NvU32 s_Ap20FixedClockTableSize = NV_ARRAY_SIZE(s_Ap20FixedClockTable);
+
+/*****************************************************************************/
+
+// TODO: Specify PLL ref divider in OSC control reg as PLL C, D, M, P, U source
+
+/*
+ * Notation clarification: in h/w documentation PLL base outputs (except PLLA
+ * output) are denoted as PllX_OUT0, and the seconadry PLL outputs (if any)
+ * after fractional dividers are denoted as PllX_OUT1, PllX_OUT2, .... However,
+ * no h/w name is defined for the base PLLA output, and the output of the PLLA
+ * secondary divider is marked as PllA_OUT0 (not PllA_OUT1). Threfore, we use
+ * PllA1 (not PllA0) to denote base PLLA clock.
+ */
+static const NvRmPllClockInfo s_Ap20PllClockTable[] =
+{
+ { /* PLLA base output */
+ NvRmClockSource_PllA1,
+ NvRmClockSource_PllP1,
+ NvRmPllType_LP,
+ CLK_RST_CONTROLLER_PLLA_BASE_0,
+ CLK_RST_CONTROLLER_PLLA_MISC_0,
+ 50000,
+ 1400000
+ },
+
+ { /* PLLC base output */
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkM,
+ NvRmPllType_LP,
+ CLK_RST_CONTROLLER_PLLC_BASE_0,
+ CLK_RST_CONTROLLER_PLLC_MISC_0,
+ 100000,
+ 1400000
+ },
+
+ { /* PLLM base output */
+ NvRmClockSource_PllM0,
+ NvRmClockSource_ClkM,
+ NvRmPllType_LP,
+ CLK_RST_CONTROLLER_PLLM_BASE_0,
+ CLK_RST_CONTROLLER_PLLM_MISC_0,
+ 100000,
+ 1200000
+ },
+
+ { /* PLLX base output */
+ NvRmClockSource_PllX0,
+ NvRmClockSource_ClkM,
+ NvRmPllType_LP,
+ CLK_RST_CONTROLLER_PLLX_BASE_0,
+ CLK_RST_CONTROLLER_PLLX_MISC_0,
+ 100000,
+ 1400000
+ },
+
+ { /* PLLP base output */
+ NvRmClockSource_PllP0,
+ NvRmClockSource_ClkM,
+ NvRmPllType_LP,
+ CLK_RST_CONTROLLER_PLLP_BASE_0,
+ CLK_RST_CONTROLLER_PLLP_MISC_0,
+ 100000,
+ 1400000
+ },
+
+ { /* PLLD base output */
+ NvRmClockSource_PllD0,
+ NvRmClockSource_ClkM,
+ NvRmPllType_MIPI,
+ CLK_RST_CONTROLLER_PLLD_BASE_0,
+ CLK_RST_CONTROLLER_PLLD_MISC_0,
+ 100000,
+ 1000000
+ },
+
+ { /* PLLU base output */
+ NvRmClockSource_PllU0,
+ NvRmClockSource_ClkM,
+ NvRmPllType_UHS,
+ CLK_RST_CONTROLLER_PLLU_BASE_0,
+ CLK_RST_CONTROLLER_PLLU_MISC_0,
+ 480000,
+ 960000
+ }
+};
+
+static const NvU32 s_Ap20PllClockTableSize = NV_ARRAY_SIZE(s_Ap20PllClockTable);
+
+/*****************************************************************************/
+
+static const NvRmDividerClockInfo s_Ap20DividerClockTable[] =
+{
+ { /* PLLA0 - PLLA secondary output */
+ NvRmClockSource_PllA0,
+ NvRmClockSource_PllA1,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLA_OUT_0,
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLA_OUT_0_PLLA_OUT0_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* PLLC1 - PLLC secondary output */
+ NvRmClockSource_PllC1,
+ NvRmClockSource_PllC0,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLC_OUT_0,
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLC_OUT_0_PLLC_OUT1_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* PLLM1 - PLLM secondary ouput */
+ NvRmClockSource_PllM1,
+ NvRmClockSource_PllM0,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLM_OUT_0,
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLM_OUT_0_PLLM_OUT1_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* PLLP1 - PLLP secondary output (overridden) */
+ NvRmClockSource_PllP1,
+ NvRmClockSource_PllP0,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLP_OUTA_0,
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_OVRRIDE_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_OVRRIDE_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_OVRRIDE_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT1_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* PLLP2 - PLLP secondary output (overridden) */
+ NvRmClockSource_PllP2,
+ NvRmClockSource_PllP0,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLP_OUTA_0,
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_OVRRIDE_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_OVRRIDE_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_OVRRIDE_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTA_0_PLLP_OUT2_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* PLLP3 - PLLP secondary output (overridden) */
+ NvRmClockSource_PllP3,
+ NvRmClockSource_PllP0,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLP_OUTB_0,
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_OVRRIDE_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_OVRRIDE_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_OVRRIDE_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT3_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* PLLP4 - PLLP secondary output (overridden) */
+ NvRmClockSource_PllP4,
+ NvRmClockSource_PllP0,
+ NvRmClockDivider_Fractional_2,
+
+ CLK_RST_CONTROLLER_PLLP_OUTB_0,
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RATIO_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RATIO_SHIFT,
+
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_OVRRIDE_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_CLKEN_FIELD |
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RSTN_FIELD,
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_OVRRIDE_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_CLKEN_ENABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RSTN_SHIFT)),
+
+ ((CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_OVRRIDE_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_OVRRIDE_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_CLKEN_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_CLKEN_SHIFT) |
+ (CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RSTN_RESET_DISABLE <<
+ CLK_RST_CONTROLLER_PLLP_OUTB_0_PLLP_OUT4_RSTN_SHIFT)),
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* AHB bus clock divider */
+ NvRmClockSource_Ahb,
+ NvRmClockSource_SystemBus,
+ NvRmClockDivider_Integer_1,
+
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0,
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_AHB_RATE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_AHB_RATE_SHIFT,
+
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_HCLK_DIS_FIELD,
+ (0x0 << CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_HCLK_DIS_SHIFT),
+ (0x1 << CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_HCLK_DIS_SHIFT),
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* APB bus clock divider */
+ NvRmClockSource_Apb,
+ NvRmClockSource_Ahb,
+ NvRmClockDivider_Integer_1,
+
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0,
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_APB_RATE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_APB_RATE_SHIFT,
+
+ CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_PCLK_DIS_FIELD,
+ (0x0 << CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_PCLK_DIS_SHIFT),
+ (0x1 << CLK_RST_CONTROLLER_CLK_SYSTEM_RATE_0_PCLK_DIS_SHIFT),
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ { /* CPU bridge clock divider */
+ NvRmClockSource_CpuBridge,
+ NvRmClockSource_CpuBus,
+ NvRmClockDivider_Integer_1,
+
+ CLK_RST_CONTROLLER_CLK_CPU_CMPLX_0,
+ CLK_RST_CONTROLLER_CLK_CPU_CMPLX_0_CPU_BRIDGE_CLKDIV_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CLK_CPU_CMPLX_0_CPU_BRIDGE_CLKDIV_SHIFT,
+ 0, 0, 0,
+
+ NVRM_VARIABLE_DIVIDER
+ },
+
+ // TODO: PLL ref divider, CLK_M input divider
+};
+
+static const NvU32 s_Ap20DividerClockTableSize = NV_ARRAY_SIZE(s_Ap20DividerClockTable);
+
+/*****************************************************************************/
+
+static const NvRmCoreClockInfo s_Ap20CoreClockTable[] =
+{
+ {
+ NvRmClockSource_CpuBus,
+ {
+ NvRmClockSource_ClkM,
+ NvRmClockSource_PllC0,
+ NvRmClockSource_ClkS,
+ NvRmClockSource_PllM0,
+ NvRmClockSource_PllP0,
+ NvRmClockSource_PllP4,
+ NvRmClockSource_PllP3,
+ NvRmClockSource_ClkD,
+ NvRmClockSource_PllX0
+ },
+
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CPU_STATE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CPU_STATE_SHIFT,
+ {
+ 0,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_IDLE_SOURCE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_RUN_SOURCE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_IRQ_SOURCE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_FIQ_SOURCE_DEFAULT_MASK
+
+ },
+ {
+ 0,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_IDLE_SOURCE_SHIFT,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_RUN_SOURCE_SHIFT,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_IRQ_SOURCE_SHIFT,
+ CLK_RST_CONTROLLER_CCLK_BURST_POLICY_0_CWAKEUP_FIQ_SOURCE_SHIFT
+ },
+
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0,
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_ENB_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_ENB_SHIFT,
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_DIVIDEND_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_DIVIDEND_SHIFT,
+ NV_FIELD_SIZE(CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_DIVIDEND_RANGE),
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_DIVISOR_SHIFT,
+ NV_FIELD_SIZE(CLK_RST_CONTROLLER_SUPER_CCLK_DIVIDER_0_SUPER_CDIV_DIVISOR_RANGE)
+ },
+ {
+ NvRmClockSource_SystemBus,
+ {
+ NvRmClockSource_ClkM,
+ NvRmClockSource_PllC1,
+ NvRmClockSource_PllP4,
+ NvRmClockSource_PllP3,
+ NvRmClockSource_PllP2,
+ NvRmClockSource_ClkD,
+ NvRmClockSource_ClkS,
+ NvRmClockSource_PllM1,
+ },
+
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SYS_STATE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SYS_STATE_SHIFT,
+ {
+ 0,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_IDLE_SOURCE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_RUN_SOURCE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_IRQ_SOURCE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_FIQ_SOURCE_DEFAULT_MASK
+
+ },
+ {
+ 0,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_IDLE_SOURCE_SHIFT,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_RUN_SOURCE_SHIFT,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_IRQ_SOURCE_SHIFT,
+ CLK_RST_CONTROLLER_SCLK_BURST_POLICY_0_SWAKEUP_FIQ_SOURCE_SHIFT
+ },
+
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0,
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_ENB_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_ENB_SHIFT,
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_DIVIDEND_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_DIVIDEND_SHIFT,
+ NV_FIELD_SIZE(CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_DIVIDEND_RANGE),
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_DIVISOR_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_DIVISOR_SHIFT,
+ NV_FIELD_SIZE(CLK_RST_CONTROLLER_SUPER_SCLK_DIVIDER_0_SUPER_SDIV_DIVISOR_RANGE)
+ }
+};
+
+static const NvU32 s_Ap20CoreClockTableSize = NV_ARRAY_SIZE(s_Ap20CoreClockTable);
+
+/*****************************************************************************/
+
+static const NvRmSelectorClockInfo s_Ap20SelectorClockTable[] =
+{
+ {
+ NvRmClockSource_AudioSync,
+ {
+ NvRmClockSource_ExtSpdf,
+ NvRmClockSource_ExtI2s1,
+ NvRmClockSource_ExtI2s2,
+ NvRmClockSource_ExtAc97,
+ NvRmClockSource_PllA0,
+ NvRmClockSource_ExtAudio2,
+ NvRmClockSource_ExtAudio1,
+ NvRmClockSource_ExtVi
+ },
+ CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_RATE_0,
+
+ CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_RATE_0_SYNC_CLK_RATE_DEFAULT_MASK,
+ CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_RATE_0_SYNC_CLK_RATE_SHIFT,
+
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0,
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_U_0_SYNC_CLK_DOUBLER_ENB_FIELD
+ },
+};
+
+static const NvU32 s_Ap20SelectorClockTableSize = NV_ARRAY_SIZE(s_Ap20SelectorClockTable);
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+static NvRmClockSourceInfo s_Ap20ClockSourceTable[NvRmClockSource_Num] = {{0}};
+
+NvRmClockSourceInfo* NvRmPrivAp20ClockSourceTableInit(void)
+{
+ NvRmClockSourceInfoPtr Src;
+
+#define PARSE_SOURCE_TABLE(type) \
+do\
+{\
+ Src.p##type = (NvRm##type##ClockInfo*)s_Ap20##type##ClockTable;\
+ NvRmPrivParseClockSources( \
+ s_Ap20ClockSourceTable, NvRmClockSource_Num, \
+ Src, s_Ap20##type##ClockTableSize, NvRmClockSourceType_##type); \
+} while(0)
+
+ NvOsMemset(s_Ap20ClockSourceTable, 0, sizeof(s_Ap20ClockSourceTable));
+
+ PARSE_SOURCE_TABLE(Fixed);
+ PARSE_SOURCE_TABLE(Pll);
+ PARSE_SOURCE_TABLE(Divider);
+ PARSE_SOURCE_TABLE(Core);
+ PARSE_SOURCE_TABLE(Selector);
+
+#undef PARSE_SOURCE_TABLE
+
+ return &s_Ap20ClockSourceTable[0];
+}
+
+/*****************************************************************************/
+
+static NvBool s_Ap20PllM0Clocks[NV_ARRAY_SIZE(g_Ap20ModuleClockTable)] = {0};
+static NvBool s_Ap20PllC0Clocks[NV_ARRAY_SIZE(g_Ap20ModuleClockTable)] = {0};
+static NvBool s_Ap20PllP0Clocks[NV_ARRAY_SIZE(g_Ap20ModuleClockTable)] = {0};
+static NvBool s_Ap20PllA0Clocks[NV_ARRAY_SIZE(g_Ap20ModuleClockTable)] = {0};
+static NvBool s_Ap20PllD0Clocks[NV_ARRAY_SIZE(g_Ap20ModuleClockTable)] = {0};
+static NvBool s_Ap20PllX0Clocks[NV_ARRAY_SIZE(g_Ap20ModuleClockTable)] = {0};
+
+static NvRmPllReference s_Ap20PllReferencesTable[] =
+{
+ { NvRmClockSource_PllM0, NvRmDfsStatusFlags_StopPllM0, 0, s_Ap20PllM0Clocks, 0 },
+ { NvRmClockSource_PllC0, NvRmDfsStatusFlags_StopPllC0, 0, s_Ap20PllC0Clocks, 0 },
+ { NvRmClockSource_PllP0, NvRmDfsStatusFlags_StopPllP0, 0, s_Ap20PllP0Clocks, 0 },
+ { NvRmClockSource_PllA0, NvRmDfsStatusFlags_StopPllA0, 0, s_Ap20PllA0Clocks, 0 },
+ { NvRmClockSource_PllD0, NvRmDfsStatusFlags_StopPllD0, 0, s_Ap20PllD0Clocks, 0 },
+ { NvRmClockSource_PllX0, NvRmDfsStatusFlags_StopPllX0, 0, s_Ap20PllX0Clocks, 0 },
+};
+static const NvU32 s_Ap20PllReferencesTableSize =
+ NV_ARRAY_SIZE(s_Ap20PllReferencesTable);
+
+void
+NvRmPrivAp20PllReferenceTableInit(
+ NvRmPllReference** pPllReferencesTable,
+ NvU32* pPllReferencesTableSize)
+{
+ NvU32 i;
+ for (i = 0; i < s_Ap20PllReferencesTableSize; i++)
+ {
+ NvOsMemset(s_Ap20PllReferencesTable[i].AttachedModules, 0,
+ sizeof(NvBool) * g_Ap20ModuleClockTableSize);
+ s_Ap20PllReferencesTable[i].ReferenceCnt = 0;
+ s_Ap20PllReferencesTable[i].ExternalClockRefCnt = 0;
+ }
+ *pPllReferencesTable = s_Ap20PllReferencesTable;
+ *pPllReferencesTableSize = s_Ap20PllReferencesTableSize;
+}
+
+/*****************************************************************************/
+
+// Power Gating Ids for each Power Group specified in re-location table header
+static const NvU32 s_Ap20PowerGroupIds[] = { NV_POWERGROUP_ENUM_TABLE };
+
+void
+NvRmPrivAp20PowerGroupTableInit(
+ const NvU32** pPowerGroupIdsTable,
+ NvU32* pPowerGroupIdsTableSize)
+{
+ *pPowerGroupIdsTable = s_Ap20PowerGroupIds;
+ *pPowerGroupIdsTableSize = NV_ARRAY_SIZE(s_Ap20PowerGroupIds);
+}
+
+/*****************************************************************************/
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_fuse.c b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_fuse.c
new file mode 100644
index 000000000000..a609581c87b0
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_fuse.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/** @file
+ * @brief <b>NVIDIA Driver Development Kit: Fuse API</b>
+ *
+ * @b Description: Contains the NvRM chip unique id implementation.
+ */
+#include "nvassert.h"
+#include "nvrm_drf.h"
+#include "nvos.h"
+#include "nvrm_module.h"
+#include "nvrm_hardware_access.h"
+#include "nvrm_hwintf.h"
+#include "ap20/arclk_rst.h"
+#include "ap20/arfuse.h"
+#include "ap20/ap20rm_misc_private.h"
+#include "ap20rm_clocks.h"
+
+NvError NvRmPrivAp20ChipUniqueId(NvRmDeviceHandle hDevHandle,void* pId)
+{
+ NvU64* pOut = (NvU64*)pId; // Pointer to output buffer
+ NvU32 OldRegData; // Old register contents
+ NvU32 NewRegData; // New register contents
+
+ NV_ASSERT(hDevHandle);
+ NV_ASSERT(pId);
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Enable fuse clock
+ Ap20EnableModuleClock(hDevHandle, NvRmModuleID_Fuse, NV_TRUE);
+#endif
+ // Access to unique id is protected, so make sure all registers visible first.
+ OldRegData = NV_REGR(hDevHandle, NvRmPrivModuleID_ClockAndReset, 0, CLK_RST_CONTROLLER_MISC_CLK_ENB_0);
+ NewRegData = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, MISC_CLK_ENB, CFG_ALL_VISIBLE, 1, OldRegData);
+ NV_REGW(hDevHandle, NvRmPrivModuleID_ClockAndReset, 0, CLK_RST_CONTROLLER_MISC_CLK_ENB_0, NewRegData);
+
+ // Read the secure id from the fuse registers and copy to the output buffer.
+ *pOut = ((NvU64)NV_REGR(hDevHandle, (NvRmPrivModuleID)NvRmModuleID_Fuse, 0, FUSE_JTAG_SECUREID_0_0)) |
+ (((NvU64)NV_REGR(hDevHandle, (NvRmPrivModuleID)NvRmModuleID_Fuse, 0, FUSE_JTAG_SECUREID_1_0)) << 32);
+
+ // Restore the protected registers enable to the way we found it.
+ NV_REGW(hDevHandle, NvRmPrivModuleID_ClockAndReset, 0, CLK_RST_CONTROLLER_MISC_CLK_ENB_0, OldRegData);
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Disable fuse clock
+ Ap20EnableModuleClock(hDevHandle, NvRmModuleID_Fuse, NV_FALSE);
+#endif
+ return NvError_Success;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_gart.c b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_gart.c
new file mode 100644
index 000000000000..0cca272fdd89
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_gart.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "ap20/armc.h"
+#include "nvrm_heap.h"
+#include "nvrm_heap_simple.h"
+#include "nvrm_hwintf.h"
+#include "ap15/ap15rm_private.h"
+#include "nvassert.h"
+#include "nvcommon.h"
+#include "nvrm_drf.h"
+
+
+/**
+ * Initialize the GART entries, and enable the GART
+ */
+
+#define GART_PAGE_SHIFT (12)
+#define GART_PAGE_SIZE (4096)
+
+extern NvBool gs_GartInited;
+extern NvRmHeapSimple gs_GartAllocator;
+extern NvU32 *gs_GartSave;
+
+/**
+ * Initializes all of the TLB entries in the GART and enables GART translations
+ * All entries are initially marked invalid.
+ *
+ * @param hDevice The RM device handle.
+ */
+static NvError
+NvRmPrivAp20InitGART(NvRmDeviceHandle hDevice);
+static NvError
+NvRmPrivAp20InitGART(NvRmDeviceHandle hDevice)
+{
+ NvU32 GartSize;
+ NvU32 GartEntries;
+ NvU32 GartEntry;
+ NvU32 reg;
+ NvU32 data;
+
+ NV_ASSERT(hDevice != NULL);
+
+ NvRmModuleGetBaseAddress(
+ hDevice, NvRmPrivModuleID_Gart, NULL, &GartSize);
+
+ GartEntries = GartSize / GART_PAGE_SIZE;
+
+ gs_GartSave = NvOsAlloc( sizeof(NvU32) * GartEntries );
+ if ( NULL == gs_GartSave )
+ return NvError_InsufficientMemory;
+
+ data = NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR_VALID, 0);
+ for (GartEntry = 0; GartEntry < GartEntries; ++GartEntry)
+ {
+ // set the address
+ reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR, GartEntry);
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_ADDR_0, reg);
+
+ // mark the entry invalid
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0, data);
+ }
+
+ // now enable the GART
+ reg = NV_DRF_DEF(MC, GART_CONFIG, GART_ENABLE, ENABLE);
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_CONFIG_0, reg);
+ return NvSuccess;
+}
+
+NvError
+NvRmPrivAp20HeapGartAlloc(
+ NvRmDeviceHandle hDevice,
+ NvOsPageAllocHandle hPageHandle,
+ NvU32 NumberOfPages,
+ NvRmPhysAddr *PAddr)
+{
+ NvError result = NvSuccess;
+ NvU32 reg;
+ NvU32 i, data;
+ NvU32 FirstGartPage;
+
+ NV_ASSERT(hDevice);
+ NV_ASSERT(hPageHandle);
+
+ result = NvRmPrivHeapSimpleAlloc(
+ &gs_GartAllocator,
+ NumberOfPages*GART_PAGE_SIZE,
+ GART_PAGE_SIZE,
+ PAddr);
+
+ if (result != NvSuccess)
+ return result;
+
+ FirstGartPage = *PAddr;
+
+ /* Check that the GART address exists and is page aligned */
+ NV_ASSERT(FirstGartPage);
+ NV_ASSERT((FirstGartPage & (GART_PAGE_SIZE - 1)) == 0);
+
+ NvOsMutexLock(hDevice->mutex);
+
+ // FIXME: Normally we would do this at init time, but it takes and
+ // egregious amount of csim time, so I'm defering it or the 3d guys
+ // will complain to me, and then to my boss, and then their boss, and then their bosses boss...
+ if (gs_GartInited == NV_FALSE)
+ {
+ result = NvRmPrivAp20InitGART(hDevice);
+ if ( NvSuccess != result )
+ goto fail;
+ gs_GartInited = NV_TRUE;
+ }
+
+ for (i = 0; i < NumberOfPages; i++)
+ {
+ data = (NvU32)NvOsPageAddress(hPageHandle, i * GART_PAGE_SIZE);
+
+ /* Check that each physical address is page aligned */
+ NV_ASSERT((data & (GART_PAGE_SIZE - 1)) == 0);
+
+ reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR, ((FirstGartPage + i*GART_PAGE_SIZE) >> GART_PAGE_SHIFT));
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_ADDR_0, reg);
+
+ reg =
+ NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR_VALID, 1) |
+ NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR, (data >> GART_PAGE_SHIFT));
+
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0, reg);
+ // lame, on csim we have to read this back to make sure the GART entry is valid before we hit the mc
+ // with data to this address.
+ (void)NV_REGR(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0);
+ }
+fail:
+ NvOsMutexUnlock(hDevice->mutex);
+
+ return result;
+}
+
+void
+NvRmPrivAp20HeapGartFree(
+ NvRmDeviceHandle hDevice,
+ NvRmPhysAddr addr,
+ NvU32 NumberOfPages)
+{
+ NvU32 i;
+ NvU32 reg;
+ NvU32 data;
+
+ NV_ASSERT(hDevice);
+
+ if (addr && NumberOfPages)
+ {
+ // Invalidate GART page table entries
+ data = NV_DRF_NUM(MC, GART_ENTRY_DATA, GART_ENTRY_DATA_PHYS_ADDR_VALID, 0);
+ for (i = 0; i < NumberOfPages; i++)
+ {
+ // set the address
+ reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR, ((addr + i*GART_PAGE_SIZE) >> GART_PAGE_SHIFT));
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_ADDR_0, reg);
+
+ // mark the entry invalid
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0, data);
+ }
+ NvRmPrivHeapSimpleFree(&gs_GartAllocator, addr);
+ }
+}
+
+
+void
+NvRmPrivAp20GartSuspend(NvRmDeviceHandle hDevice)
+{
+ NvU32 reg;
+ NvU32 GartSize;
+ NvU32 GartEntries;
+ NvU32 GartEntry;
+
+ NvOsMutexLock(hDevice->mutex);
+ if (gs_GartInited == NV_TRUE)
+ {
+ NvRmModuleGetBaseAddress(
+ hDevice, NvRmPrivModuleID_Gart, NULL, &GartSize);
+ GartEntries = GartSize / GART_PAGE_SIZE;
+
+ for (GartEntry = 0; GartEntry < GartEntries; GartEntry++)
+ {
+ reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR,
+ GartEntry);
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
+ MC_GART_ENTRY_ADDR_0, reg);
+ gs_GartSave[GartEntry] = NV_REGR(hDevice,
+ NvRmPrivModuleID_MemoryController, 0, MC_GART_ENTRY_DATA_0);
+ }
+ }
+ NvOsMutexUnlock(hDevice->mutex);
+}
+
+void
+NvRmPrivAp20GartResume(NvRmDeviceHandle hDevice)
+{
+ NvU32 reg;
+ NvU32 GartSize;
+ NvU32 GartEntries;
+ NvU32 GartEntry;
+
+ NvOsMutexLock(hDevice->mutex);
+ if (gs_GartInited == NV_TRUE)
+ {
+ NvRmModuleGetBaseAddress(
+ hDevice, NvRmPrivModuleID_Gart, NULL, &GartSize);
+ GartEntries = GartSize / GART_PAGE_SIZE;
+
+ for (GartEntry = 0; GartEntry < GartEntries; GartEntry++)
+ {
+ reg = NV_DRF_NUM(MC, GART_ENTRY_ADDR, GART_ENTRY_ADDR_TABLE_ADDR,
+ GartEntry);
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
+ MC_GART_ENTRY_ADDR_0, reg);
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
+ MC_GART_ENTRY_DATA_0, gs_GartSave[GartEntry] );
+ }
+
+ reg = NV_DRF_DEF(MC, GART_CONFIG, GART_ENABLE, ENABLE);
+ NV_REGW(hDevice, NvRmPrivModuleID_MemoryController, 0,
+ MC_GART_CONFIG_0, reg);
+
+ }
+ NvOsMutexUnlock(hDevice->mutex);
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_memctrl.c b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_memctrl.c
new file mode 100644
index 000000000000..373422c888df
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_memctrl.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvrm_init.h"
+#include "nvassert.h"
+#include "nvos.h"
+#include "nvrm_module.h"
+#include "ap20/armc.h"
+#include "ap20/arahb_arbc.h"
+#include "nvrm_drf.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_structure.h"
+
+NvError NvRmPrivAp20McErrorMonitorStart(NvRmDeviceHandle hRm);
+void NvRmPrivAp20McErrorMonitorStop(NvRmDeviceHandle hRm);
+void NvRmPrivAp20SetupMc(NvRmDeviceHandle hRm);
+static void McErrorIntHandler(void* args);
+static NvOsInterruptHandle s_McInterruptHandle = NULL;
+
+void McErrorIntHandler(void* args)
+{
+ NvU32 RegVal;
+ NvU32 IntStatus;
+ NvU32 IntClear = 0;
+ NvRmDeviceHandle hRm = (NvRmDeviceHandle)args;
+
+ IntStatus = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0, MC_INTSTATUS_0);
+ if ( NV_DRF_VAL(MC, INTSTATUS, SECURITY_VIOLATION_INT, IntStatus) )
+ {
+ IntClear |= NV_DRF_DEF(MC, INTSTATUS, SECURITY_VIOLATION_INT, SET);
+ RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_SECURITY_VIOLATION_ADR_0);
+ NvOsDebugPrintf("SECURITY_VIOLATION DecErrAddress=0x%x ", RegVal);
+ RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_SECURITY_VIOLATION_STATUS_0);
+ NvOsDebugPrintf("SECURITY_VIOLATION DecErrStatus=0x%x ", RegVal);
+ }
+ if ( NV_DRF_VAL(MC, INTSTATUS, DECERR_EMEM_OTHERS_INT, IntStatus) )
+ {
+ IntClear |= NV_DRF_DEF(MC, INTSTATUS, DECERR_EMEM_OTHERS_INT, SET);
+ RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_DECERR_EMEM_OTHERS_ADR_0);
+ NvOsDebugPrintf("EMEM DecErrAddress=0x%x ", RegVal);
+ RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_DECERR_EMEM_OTHERS_STATUS_0);
+ NvOsDebugPrintf("EMEM DecErrStatus=0x%x ", RegVal);
+ }
+ if ( NV_DRF_VAL(MC, INTSTATUS, INVALID_GART_PAGE_INT, IntStatus) )
+ {
+ IntClear |= NV_DRF_DEF(MC, INTSTATUS, INVALID_GART_PAGE_INT, SET);
+ RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_GART_ERROR_ADDR_0);
+ NvOsDebugPrintf("GART DecErrAddress=0x%x ", RegVal);
+ RegVal = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_GART_ERROR_REQ_0);
+ NvOsDebugPrintf("GART DecErrStatus=0x%x ", RegVal);
+ }
+
+ NV_ASSERT(!"MC Decode Error ");
+ // Clear the interrupt.
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_INTSTATUS_0, IntClear);
+ NvRmInterruptDone(s_McInterruptHandle);
+}
+
+NvError NvRmPrivAp20McErrorMonitorStart(NvRmDeviceHandle hRm)
+{
+ NvU32 val;
+ NvU32 IrqList;
+ NvError e = NvSuccess;
+ NvOsInterruptHandler handler;
+
+ if (s_McInterruptHandle == NULL)
+ {
+ // Install an interrupt handler.
+ handler = McErrorIntHandler;
+ IrqList = NvRmGetIrqForLogicalInterrupt(hRm,
+ NvRmPrivModuleID_MemoryController, 0);
+ NV_CHECK_ERROR( NvRmInterruptRegister(hRm, 1, &IrqList, &handler,
+ hRm, &s_McInterruptHandle, NV_TRUE) );
+ // Enable Dec Err interrupts in memory Controller.
+ val = NV_DRF_DEF(MC, INTMASK, SECURITY_VIOLATION_INTMASK, UNMASKED) |
+ NV_DRF_DEF(MC, INTMASK, DECERR_EMEM_OTHERS_INTMASK, UNMASKED) |
+ NV_DRF_DEF(MC, INTMASK, INVALID_GART_PAGE_INTMASK, UNMASKED);
+ NV_REGW(hRm, NvRmPrivModuleID_MemoryController, 0, MC_INTMASK_0, val);
+ }
+ return e;
+}
+
+void NvRmPrivAp20McErrorMonitorStop(NvRmDeviceHandle hRm)
+{
+ NvRmInterruptUnregister(hRm, s_McInterruptHandle);
+ s_McInterruptHandle = NULL;
+}
+
+/* This function sets some performance timings for Mc & Emc. Numbers are from
+ * the Arch team.
+ *
+ */
+void NvRmPrivAp20SetupMc(NvRmDeviceHandle hRm)
+{
+ NvU32 reg, mask;
+ reg = NV_REGR(hRm, NvRmPrivModuleID_MemoryController, 0,
+ MC_LOWLATENCY_CONFIG_0);
+ mask = NV_DRF_DEF(MC, LOWLATENCY_CONFIG, MPCORER_LL_CTRL, ENABLE) |
+ NV_DRF_DEF(MC, LOWLATENCY_CONFIG, MPCORER_LL_SEND_BOTH, ENABLE);
+ if ( mask != (reg & mask) )
+ NV_ASSERT(!"MC LL Path not enabled!");
+ // For AP20, no need to program any MC timeout registers here. Default
+ // values should be good enough.
+
+ // Setup the AHB MEM configuration for USB performance.
+ // Enabling the AHB prefetch bits for USB1 USB2 and USB3.
+ // 64kiloByte boundaries
+ // 4096 cycles before prefetched data is invalidated due to inactivity.
+ reg = NV_DRF_NUM(AHB_AHB_MEM, PREFETCH_CFG1, ENABLE, 1) |
+ NV_DRF_DEF(AHB_AHB_MEM, PREFETCH_CFG1, AHB_MST_ID, AHBDMA)|
+ NV_DRF_NUM(AHB_AHB_MEM, PREFETCH_CFG1, ADDR_BNDRY, 0xC) |
+ NV_DRF_NUM(AHB_AHB_MEM, PREFETCH_CFG1, SPEC_THROTTLE, 0x0) |
+ NV_DRF_NUM(AHB_AHB_MEM, PREFETCH_CFG1, INACTIVITY_TIMEOUT, 0x1000);
+ NV_REGW( hRm, NvRmPrivModuleID_Ahb_Arb_Ctrl, 0,
+ AHB_AHB_MEM_PREFETCH_CFG1_0, reg );
+
+ reg = NV_DRF_NUM(AHB_AHB_MEM, PREFETCH_CFG2, ENABLE, 1) |
+ NV_DRF_DEF(AHB_AHB_MEM, PREFETCH_CFG2, AHB_MST_ID, USB)|
+ NV_DRF_DEF(AHB_AHB_MEM, PREFETCH_CFG2, AHB_MST_ID, USB2)|
+ NV_DRF_DEF(AHB_AHB_MEM, PREFETCH_CFG2, AHB_MST_ID, USB3)|
+ NV_DRF_NUM(AHB_AHB_MEM, PREFETCH_CFG2, ADDR_BNDRY, 0xC) |
+ NV_DRF_NUM(AHB_AHB_MEM, PREFETCH_CFG2, SPEC_THROTTLE, 0x0) |
+ NV_DRF_NUM(AHB_AHB_MEM, PREFETCH_CFG2, INACTIVITY_TIMEOUT, 0x1000);
+ NV_REGW( hRm, NvRmPrivModuleID_Ahb_Arb_Ctrl, 0,
+ AHB_AHB_MEM_PREFETCH_CFG2_0, reg );
+
+}
+
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_misc_private.h b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_misc_private.h
new file mode 100644
index 000000000000..9d790ef85e0f
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_misc_private.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef AP20RM_MISC_PRIVATE_H
+#define AP20RM_MISC_PRIVATE_H
+
+/*
+ * ap20rm_misc_private.h defines the miscellenious private implementation functions for the resource
+ * manager for ap20 chip.
+ */
+
+#include "nvcommon.h"
+#include "nvos.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+
+/**
+ * Chip unque id for AP15 and ap16.
+ */
+NvError
+NvRmPrivAp20ChipUniqueId(
+ NvRmDeviceHandle hDevHandle,
+ void* pId);
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // AP20RM_MISC_PRIVATE_H
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_pinmux_tables.c b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_pinmux_tables.c
new file mode 100644
index 000000000000..312ca6d4f69f
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_pinmux_tables.c
@@ -0,0 +1,1213 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_drf.h"
+#include "nvassert.h"
+#include "nvrm_hwintf.h"
+#include "ap20/arapb_misc.h"
+#include "ap20/arclk_rst.h"
+#include "nvrm_pinmux_utils.h"
+#include "nvrm_clocks.h"
+#include "nvodm_query_pinmux.h"
+
+// FIXME: None of the modules have reset configurations, yet. This should
+// be fixed.
+static const NvU32 g_Ap20Mux_Uart1[] = {
+ UNCONFIG(C,IRRX,UARTA,UARTB),UNCONFIG(C,IRTX,UARTA,UARTB),
+ UNCONFIG(A,UAA,UARTA,MIPI_HS),UNCONFIG(A,UAB,UARTA,MIPI_HS),
+ UNCONFIG(D,SDB,UARTA,PWM),UNCONFIG(D,SDD,UARTA,PWM),
+ CONFIGEND(),
+ CONFIG(B,A,UAA,UARTA),CONFIG(B,A,UAB,UARTA),CONFIGEND(),
+ CONFIG(A,D,GPU,UARTA),CONFIGEND(),
+ CONFIG(A,C,IRRX,UARTA),CONFIG(A,C,IRTX,UARTA),CONFIG(B,A,UAD,UARTA),CONFIGEND(),
+ CONFIG(A,C,IRRX,UARTA),CONFIG(A,C,IRTX,UARTA),CONFIGEND(),
+ CONFIG(B,D,SDD,UARTA),CONFIG(D,D,SDB,UARTA),CONFIGEND(),
+ // added as additonal config for 4b mode.
+ CONFIG(B,A,UAA,UARTA),CONFIGEND(),
+ CONFIG(A,A,SDIO1,UARTA),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Uart2[] = {
+ /* disown IRDA and SPDIF chosen .*/
+ UNCONFIG(A,UAD,IRDA,SPDIF),CONFIGEND(),
+ CONFIG(A,C,IRRX,UARTB),CONFIG(A,C,IRTX,UARTB),CONFIG(B,A,UAD,IRDA),CONFIGEND(),
+ CONFIG(B,A,UAD,IRDA),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Uart3[] = {
+ UNCONFIG(B,UCA,UARTC,RSVD2),UNCONFIG(B,UCB,UARTC,RSVD4),CONFIGEND(),
+ CONFIG(B,B,UCA,UARTC),CONFIG(B,B,UCB,UARTC),CONFIGEND(),
+ CONFIG(B,B,UCA,UARTC),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Uart4[] = {
+ UNCONFIG(B,GMC,UARTD,SPI4),CONFIGEND(),
+ CONFIG(D,A,UDA,UARTD),CONFIGEND(),
+ CONFIG(A,B,GMC,UARTD),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Uart5[] = {
+ UNCONFIG(B,GMA,UARTE,SPI3),CONFIGEND(),
+ CONFIG(A,A,SDIO1,UARTE),CONFIGEND(),
+ CONFIG(A,B,GMA,UARTE),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32* g_Ap20MuxUart[] = {
+ &g_Ap20Mux_Uart1[0],
+ &g_Ap20Mux_Uart2[0],
+ &g_Ap20Mux_Uart3[0],
+ &g_Ap20Mux_Uart4[0],
+ &g_Ap20Mux_Uart5[0],
+ NULL,
+};
+
+
+static const NvU32 g_Ap20Mux_Spi1[] = {
+ /* Disown UDA,SPIA,SPIB and SPIC RSVD and GMI chosen*/
+ UNCONFIG(A, UDA, SPI1, RSVD),UNCONFIG(D, SPIA, SPI1, GMI),
+ UNCONFIG(D, SPIB, SPI1, GMI),UNCONFIG(D, SPIC, SPI1, GMI),
+ CONFIGEND(),
+ CONFIG(D,A,UDA,SPI1),CONFIGEND(),
+ CONFIG(A,B,DTE,SPI1),CONFIG(A,B,DTB,SPI1),CONFIGEND(),
+ CONFIG(B,D,SPIC,SPI1),CONFIG(B,D,SPIB,SPI1),CONFIG(B,D,SPIA,SPI1),CONFIGEND(),
+ CONFIG(B,D,SPIE,SPI1),CONFIG(B,D,SPIF,SPI1),CONFIG(B,D,SPID,SPI1),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Spi2[] = {
+ // Reset config - abandon UAB, pads. MIPI_HS and GMI chosen
+ UNCONFIG(A,UAB,SPI2,MIPI_HS), UNCONFIG(D,SPID,SPI2,GMI),
+ UNCONFIG(D,SPIE,SPI2,GMI),
+ CONFIGEND(),
+ // config 1
+ CONFIG(B,A,UAB,SPI2),CONFIGEND(),
+ // config 2
+ CONFIG(B,D,SPIC,SPI2),CONFIG(B,D,SPIB,SPI2),CONFIG(B,D,SPIA,SPI2),
+ CONFIG(B,D,SPIG,SPI2),CONFIG(B,D,SPIH,SPI2),CONFIGEND(),
+ // config 3
+ CONFIG(B,D,SPIE,SPI2_ALT),CONFIG(B,D,SPIF,SPI2),CONFIG(B,D,SPID,SPI2_ALT),
+ CONFIG(B,D,SPIG,SPI2_ALT),CONFIG(B,D,SPIH,SPI2_ALT),CONFIGEND(),
+ // config 4
+ CONFIG(B,D,SPIC,SPI2),CONFIG(B,D,SPIB,SPI2),CONFIG(B,D,SPIA,SPI2),
+ CONFIGEND(),
+ // config 5
+ CONFIG(B,D,SPIE,SPI2_ALT),CONFIG(B,D,SPIF,SPI2),CONFIG(B,D,SPID,SPI2_ALT),
+ CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Spi3[] = {
+ // Reset config - abandon UAA,SPIF,SPIG,SPIH pads. MIPI_HS and SPI2_ALT chosen
+ UNCONFIG(A,UAA,SPI3,UARTA), UNCONFIG(D,SPIF,SPI3,RSVD),
+ UNCONFIG(D,SPIG,SPI3,SPI2_ALT),UNCONFIG(D,SPIH,SPI3,SPI2_ALT),
+ CONFIGEND(),
+ // Config 1
+ CONFIG(B,A,UAA,SPI3),CONFIGEND(),
+ // Config 2.
+ CONFIG(C,E,LSC1,SPI3),CONFIG(D,E,LPW2,SPI3),CONFIG(D,E,LPW0,SPI3),
+ CONFIG(C,E,LM0,SPI3),CONFIGEND(),
+ // config 3
+ CONFIG(C,E,LSCK,SPI3),CONFIG(D,E,LSDI,SPI3),CONFIG(D,E,LSDA,SPI3),
+ CONFIG(C,E,LCSN,SPI3),CONFIGEND(),
+ // config 4
+ CONFIG(A,B,GMA,SPI3),CONFIGEND(),
+ // config 5
+ CONFIG(B,D,SPIC,SPI3),CONFIG(B,D,SPIB,SPI3),CONFIG(B,D,SPIA,SPI3), CONFIGEND(),
+ // config 6
+ CONFIG(B,D,SDC,SPI3),CONFIG(B,D,SDD,SPI3), CONFIGEND(),
+
+ // config 7
+ /* -spif,spig,and spih are added as config 7 on mux: 0
+ * -spia of SPI2_MOSI as spi3_dout on mux: 2 under config 7.
+ */
+ CONFIG(B,D,SPIA,SPI3),CONFIG(B,D,SPIF,SPI3),
+ CONFIG(B,D,SPIG,SPI3),CONFIG(B,D,SPIG,SPI3), CONFIGEND(),
+ MODULEDONE(),
+};
+
+
+static const NvU32 g_Ap20Mux_Spi4[] = {
+ CONFIGEND(),
+ // config 1
+ CONFIG(B,A,UAD,SPI4),CONFIG(A,C,IRRX,SPI4),CONFIG(A,C,IRTX,SPI4),CONFIGEND(),
+ // config 2
+ CONFIG(A,B,GMC,SPI4),CONFIGEND(),
+ // config 3
+ CONFIG(B,B,SLXC,SPI4),CONFIG(B,B,SLXK,SPI4),CONFIG(B,B,SLXA,SPI4),
+ CONFIG(B,B,SLXD,SPI4),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxSpi[] = {
+ &g_Ap20Mux_Spi1[0],
+ &g_Ap20Mux_Spi2[0],
+ &g_Ap20Mux_Spi3[0],
+ &g_Ap20Mux_Spi4[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Sflash[] = {
+ CONFIGEND(),
+ CONFIG(B,C,GMD,SFLASH),CONFIG(A,B,GMC,SFLASH),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxSflash[] = {
+ &g_Ap20Mux_Sflash[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Twc[] = {
+ CONFIGEND(),
+ CONFIG(A,C,DAP2,TWC),CONFIGEND(),
+ CONFIG(B,D,SDC,TWC),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxTwc[] = {
+ &g_Ap20Mux_Twc[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_I2c1[] = {
+ UNCONFIG(A,RM,I2C,RSVD1),CONFIGEND(),
+ // config 1
+ CONFIG(A,A,RM,I2C),CONFIGEND(),
+ // config 2
+ CONFIG(B,D,SPDI,I2C),CONFIG(B,D,SPDO,I2C),CONFIGEND(),
+ // config 3
+ CONFIG(B,D,SPIG,I2C),CONFIG(B,D,SPIH,I2C),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_I2c2[] = {
+ // reset & multiplex config
+ UNCONFIG(G,PTA,I2C2,RSVD3),UNCONFIG(C,DDC,I2C2,RSVD1),CONFIGEND(),
+ // config 1
+ CONFIG(B,C,DDC,I2C2),CONFIGEND(),
+ // config 2
+ CONFIG(A,G,PTA,I2C2),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_I2c3[] = {
+ UNCONFIG(G,DTF,I2C3,RSVD2),CONFIGEND(),
+ CONFIG(D,G,DTF,I2C3),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxI2c[] = {
+ &g_Ap20Mux_I2c1[0],
+ &g_Ap20Mux_I2c2[0],
+ &g_Ap20Mux_I2c3[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_I2cPmu[] = {
+ UNCONFIG(C,I2CP,I2C,RSVD2),CONFIGEND(),
+ CONFIG(A,C,I2CP,I2C),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxI2cPmu[] = {
+ &g_Ap20Mux_I2cPmu[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Ulpi[] = {
+ CONFIGEND(),
+ CONFIG(B,A,UAA,ULPI),CONFIG(B,A,UAB,ULPI),CONFIG(D,A,UDA,ULPI),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxUlpi[] = {
+ &g_Ap20Mux_Ulpi[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Sdio1[] = {
+ UNCONFIG(A,SDIO1,SDIO1,RSVD1),CONFIGEND(),
+ CONFIG(A,A,SDIO1,SDIO1),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Sdio2[] = {
+ CONFIGEND(),
+ // config 1
+ CONFIG(D,G,KBCD,SDIO2),CONFIG(A,C,KBCB,SDIO2),CONFIGEND(),
+ // config 2
+ CONFIG(D,G,KBCD,SDIO2),CONFIG(A,C,KBCB,SDIO2),CONFIG(A,C,KBCA,SDIO2),CONFIGEND(),
+ // config 3
+ CONFIG(A,C,DAP1,SDIO2),CONFIG(B,D,SPDI,SDIO2),CONFIG(B,D,SPDO,SDIO2),CONFIGEND(),
+ // config 4
+ CONFIG(A,B,DTA,SDIO2),CONFIG(A,B,DTD,SDIO2),CONFIGEND(),
+ // config 5
+ CONFIG(A,B,DTA,SDIO2),CONFIG(A,B,DTD,SDIO2),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Sdio3[] = {
+ CONFIGEND(),
+ // config 1
+ CONFIG(B,D,SDD,SDIO3), CONFIG(B,D,SDC,SDIO3), CONFIG(D,D,SDB,SDIO3), CONFIG(B,B,SLXA,SDIO3),
+ CONFIG(B,B,SLXK,SDIO3),CONFIG(B,B,SLXC,SDIO3),CONFIG(B,B,SLXD,SDIO3),CONFIGEND(),
+ // config 2
+ CONFIG(B,D,SDD,SDIO3),CONFIG(B,D,SDC,SDIO3), CONFIG(D,D,SDB,SDIO3), CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Sdio4[] = {
+ CONFIGEND(),
+ // config 1
+ CONFIG(A,A,ATC,SDIO4),CONFIG(A,A,ATD,SDIO4),CONFIGEND(),
+ // config 2
+ BRANCH(3),CONFIG(B,D,GME,SDIO4),CONFIGEND(),
+ // config 3
+ CONFIG(A,A,ATB,SDIO4),CONFIG(A,B,GMA,SDIO4),CONFIGEND(),
+ MODULEDONE(),
+ SUBROUTINESDONE(),
+};
+
+static const NvU32 *g_Ap20MuxSdio[] = {
+ &g_Ap20Mux_Sdio1[0],
+ &g_Ap20Mux_Sdio2[0],
+ &g_Ap20Mux_Sdio3[0],
+ &g_Ap20Mux_Sdio4[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Spdif[] = {
+ UNCONFIG(D,SPDO,SPDIF,RSVD),UNCONFIG(D,SPDI,SPDIF,RSVD),
+ UNCONFIG(B,SLXD,SPDIF,SPI4),UNCONFIG(B,SLXC,SPDIF,SPI4),
+ CONFIGEND(),
+ // config 1
+ CONFIG(B,D,SPDO,SPDIF),CONFIG(B,D,SPDI,SPDIF),CONFIGEND(),
+ // config 2
+ CONFIG(B,B,SLXD,SPDIF),CONFIG(B,B,SLXC,SPDIF),CONFIGEND(),
+ // config 3
+ CONFIG(B,A,UAD,SPDIF),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxSpdif[] = {
+ &g_Ap20Mux_Spdif[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Hsi[] = {
+ CONFIGEND(),
+ CONFIG(B,A,UAA,MIPI_HS),CONFIG(B,A,UAB,MIPI_HS),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxHsi[] = {
+ &g_Ap20Mux_Hsi[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Hdmi[] = {
+ // HDINT resets to HDMI, so move it to a reserved pin RSVD2
+ UNCONFIG(B,HDINT,HDMI,RSVD2),CONFIGEND(),
+ CONFIG(C,B,HDINT,HDMI),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxHdmi[] = {
+ &g_Ap20Mux_Hdmi[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Pwm[] = {
+ UNCONFIG(D,GPU,PWM,RSVD4),UNCONFIG(D,SDC,PWM,TWC),
+ CONFIGEND(),
+ // config 1
+ CONFIG(A,D,GPU,PWM),CONFIGEND(),
+ // config 2
+ CONFIG(B,B,UCB,PWM),CONFIG(B,D,SDD,PWM),CONFIGEND(),
+ // config 3
+ CONFIG(B,B,UCB,PWM),CONFIGEND(),
+ // config 4
+ CONFIG(B,D,SDD,PWM),CONFIGEND(),
+ // config 5
+ CONFIG(B,D,SDC,PWM),CONFIG(B,D,SDD,PWM),CONFIGEND(),
+ // config 6
+ CONFIG(B,D,SDC,PWM),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxPwm[] = {
+ &g_Ap20Mux_Pwm[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Ata[] = {
+ CONFIGEND(),
+ CONFIG(A,A,ATA,IDE),CONFIG(A,A,ATB,IDE),CONFIG(A,A,ATC,IDE),
+ CONFIG(A,A,ATD,IDE),CONFIG(B,A,ATE,IDE),CONFIG(B,C,GMB,IDE),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxAta[] = {
+ &g_Ap20Mux_Ata[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Nand[] = {
+
+ UNCONFIG(A,ATA,NAND,IDE), UNCONFIG(A,ATB,NAND,IDE), UNCONFIG(A,ATC,NAND,IDE),
+ UNCONFIG(A,ATD,NAND,IDE), UNCONFIG(A,ATE,NAND,IDE),
+ CONFIGEND(),
+ // config 1
+ CONFIG(A,A,ATA,NAND),CONFIG(A,A,ATB,NAND),CONFIG(A,A,ATC,NAND),
+ CONFIG(A,A,ATD,NAND),CONFIG(B,A,ATE,NAND),CONFIG(B,C,GMB,IDE), CONFIGEND(),
+ //.config 2
+ CONFIG(A,A,ATA,NAND),CONFIG(A,A,ATB,NAND),CONFIG(A,A,ATC,NAND),CONFIGEND(),
+ // config 3
+ CONFIG(A,C,KBCA,NAND),CONFIG(A,C,KBCB,NAND),CONFIG(B,C,KBCC,NAND),
+ CONFIG(D,G,KBCD,NAND),CONFIG(A,A,KBCE,NAND),CONFIG(A,A,KBCF,NAND), CONFIGEND(),
+ // config 4
+ CONFIG(A,A,ATC,NAND),CONFIGEND(),
+
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxNand[] = {
+ &g_Ap20Mux_Nand[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Dap1[] = {
+ CONFIGEND(),
+ CONFIG(A,C,DAP1,DAP1),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Dap2[] = {
+ CONFIGEND(),
+ CONFIG(A,C,DAP2,DAP2),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Dap3[] = {
+ CONFIGEND(),
+ CONFIG(A,C,DAP3,DAP3),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Dap4[] = {
+ CONFIGEND(),
+ CONFIG(A,C,DAP4,DAP4),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_Dap5[] = {
+ CONFIGEND(),
+ CONFIG(B,D,GME,DAP5),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxDap[] = {
+ &g_Ap20Mux_Dap1[0],
+ &g_Ap20Mux_Dap2[0],
+ &g_Ap20Mux_Dap3[0],
+ &g_Ap20Mux_Dap4[0],
+ &g_Ap20Mux_Dap5[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Kbd[] = {
+ CONFIGEND(),
+ // config 1
+ BRANCH(2),CONFIG(A,C,KBCB,KBC),CONFIGEND(),
+ // config 2
+ BRANCH(3),CONFIG(D,G,KBCD,KBC),CONFIGEND(),
+ // config 3
+ BRANCH(4),CONFIG(A,A,KBCF,KBC),CONFIGEND(),
+ // config 4
+ CONFIG(A,C,KBCA,KBC),CONFIG(B,C,KBCC,KBC),CONFIG(A,A,KBCE,KBC),CONFIGEND(),
+ MODULEDONE(),
+ SUBROUTINESDONE(),
+};
+
+static const NvU32 *g_Ap20MuxKbd[] = {
+ &g_Ap20Mux_Kbd[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Hdcp[] = {
+ CONFIGEND(),
+ // config 1
+ CONFIG(A,G,PTA,HDMI),CONFIGEND(),
+ // config 2
+ CONFIG(C,E,LSCK,HDMI),CONFIG(D,E,LSDA,HDMI),CONFIGEND(),
+ // config 3
+ CONFIG(D,E,LPW2,HDMI),CONFIG(D,E,LPW0,HDMI),CONFIGEND(),
+ // config 4
+ CONFIG(C,E,LSC1,HDMI),CONFIG(D,E,LPW0,HDMI),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxHdcp[] = {
+ &g_Ap20Mux_Hdcp[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Snor[] = {
+ CONFIGEND(),
+ BRANCH(2),
+ // config 1. separate 32b NOR
+ CONFIG(A,C,IRRX,GMI),CONFIG(A,C,IRTX,GMI),CONFIG(B,B,UCA,GMI),
+ CONFIG(B,B,UCB,GMI),CONFIG(A,D,GPU,GMI),CONFIGEND(),
+ // config 2. muxed 32b NOR
+ BRANCH(3),
+ CONFIG(A,B,GMC,GMI),CONFIG(A,B,GMA,GMI), CONFIG(B,D,GME,GMI),
+ CONFIG(A,C,DAP1,GMI), CONFIGEND(),
+ // config 3. muxed 16b NOR
+ BRANCH(6),
+ CONFIG(B,C,GMB,GMI),CONFIG(A,A,ATB,GMI),CONFIGEND(),
+ // config 4. separate 16b NOR
+ BRANCH(3),
+ CONFIG(A,C,IRRX,GMI),CONFIG(A,C,IRTX,GMI),CONFIG(B,B,UCA,GMI),
+ CONFIG(B,B,UCB,GMI),CONFIG(A,D,GPU,GMI),CONFIGEND(),
+ // config 5. MuxOneNAND
+ BRANCH(6),
+ CONFIG(B,C,GMB,GMI_INT),CONFIG(A,B,GMC,SFLASH),CONFIGEND(),
+ MODULEDONE(),
+ // subroutine 1. shared by 16b muxed NOR & muxOneNand
+ CONFIG(A,C,DAP4,GMI),CONFIG(A,C,DAP2,GMI),CONFIG(B,D,SPIA,GMI),
+ CONFIG(B,D,SPIB,GMI),CONFIG(B,D,SPIC,GMI),CONFIG(B,D,SPID,GMI),
+ CONFIG(B,D,SPIE,GMI),
+ CONFIG(A,A,ATA,GMI),CONFIG(A,A,ATC,GMI),CONFIG(A,A,ATD,GMI),
+ CONFIG(B,A,ATE,GMI),CONFIG(B,C,GMD,GMI),CONFIGEND(),
+ SUBROUTINESDONE(),
+};
+
+static const NvU32 *g_Ap20MuxSnor[] = {
+ &g_Ap20Mux_Snor[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Mio[] = {
+ CONFIGEND(),
+ CONFIG(A,A,KBCF,MIO),CONFIG(D,G,KBCD,MIO),CONFIG(A,C,KBCB,MIO),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxMio[] = {
+ &g_Ap20Mux_Mio[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_ExtClock1[] = {
+ CONFIGEND(),
+ CONFIG(A,C,CDEV1,PLLA_OUT),CONFIGEND(),
+ CONFIG(A,C,CDEV1,OSC),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_ExtClock2[] = {
+ CONFIGEND(),
+ CONFIG(A,C,CDEV2,AHB_CLK),CONFIGEND(),
+ CONFIG(A,C,CDEV2,OSC),CONFIGEND(),
+ CONFIG(A,C,CDEV2,PLLP_OUT4),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 g_Ap20Mux_ExtClock3[] = {
+ CONFIGEND(),
+ CONFIG(A,C,CSUS,VI_SENSOR_CLK),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxExtClock[] = {
+ &g_Ap20Mux_ExtClock1[0],
+ &g_Ap20Mux_ExtClock2[0],
+ &g_Ap20Mux_ExtClock3[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Vi[] = {
+ CONFIGEND(),
+ BRANCH(2),CONFIG(D,G,DTF,VI),CONFIGEND(),
+ CONFIG(A,B,DTA,VI),CONFIG(A,B,DTB,VI),CONFIG(A,B,DTC,VI),
+ CONFIG(A,B,DTD,VI),CONFIG(A,B,DTE,VI),CONFIGEND(),
+ MODULEDONE(),
+ SUBROUTINESDONE(),
+};
+
+static const NvU32 *g_Ap20MuxVi[] = {
+ &g_Ap20Mux_Vi[0],
+ NULL,
+};
+
+const NvU32 g_Ap20Mux_BacklightDisplay1Pwm0[] = {
+ CONFIGEND(),
+ // Config 1 LPW0 pad
+ CONFIG(D,E,LPW0,DISPLAYA), CONFIGEND(),
+ // Config 2 LPW2 pad
+ CONFIG(D,E,LPW2,DISPLAYA), CONFIGEND(),
+ // Config 3 LM0 pad
+ CONFIG(C,E,LM0,DISPLAYA), CONFIGEND(),
+ MODULEDONE(),
+};
+
+const NvU32 g_Ap20Mux_BacklightDisplay1Pwm1[] = {
+ CONFIGEND(),
+ // Config 1 LM1 pad
+ CONFIG(C,E,LM1,DISPLAYA), CONFIGEND(),
+ // Config 2 LDC pad
+ CONFIG(C,E,LDC,DISPLAYA), CONFIGEND(),
+ // Config 3 LPW1 pad
+ CONFIG(D,E,LPW1,DISPLAYA), CONFIGEND(),
+ MODULEDONE(),
+};
+
+const NvU32 g_Ap20Mux_BacklightDisplay2Pwm0[] = {
+ CONFIGEND(),
+ // Config 1 LPW0 pad
+ CONFIG(D,E,LPW0,DISPLAYB), CONFIGEND(),
+ // Config 2 LPW2 pad
+ CONFIG(D,E,LPW2,DISPLAYB), CONFIGEND(),
+ // Config 3 LM0 pad
+ CONFIG(C,E,LM0,DISPLAYB), CONFIGEND(),
+ MODULEDONE(),
+};
+
+const NvU32 g_Ap20Mux_BacklightDisplay2Pwm1[] = {
+ CONFIGEND(),
+ // Config 1 LM1 pad
+ CONFIG(C,E,LM1,DISPLAYB), CONFIGEND(),
+ // Config 2 LDC pad
+ CONFIG(C,E,LDC,DISPLAYB), CONFIGEND(),
+ // Config 3 LPW1 pad
+ CONFIG(D,E,LPW1,DISPLAYB), CONFIGEND(),
+ MODULEDONE(),
+};
+
+const NvU32* g_Ap20MuxBacklight[] = {
+ &g_Ap20Mux_BacklightDisplay1Pwm0[0],
+ &g_Ap20Mux_BacklightDisplay1Pwm1[0],
+ &g_Ap20Mux_BacklightDisplay2Pwm0[0],
+ &g_Ap20Mux_BacklightDisplay2Pwm1[0],
+ NULL,
+};
+
+
+static const NvU32 g_Ap20Mux_Display1[] = {
+ CONFIGEND(),
+ // config 1, 24b RGB. Pure superset of Config2 (18b RGB)
+ BRANCH(2),
+ CONFIG(C,G,LHP1,DISPLAYA),CONFIG(C,G,LHP2,DISPLAYA),CONFIG(C,G,LVP1,DISPLAYA),
+ CONFIG(C,G,LHP0,DISPLAYA),CONFIG(D,G,LDI,DISPLAYA),CONFIG(D,G,LPP,DISPLAYA),
+ CONFIGEND(),
+ // config 2, 18b RGB.
+ BRANCH(7),
+ CONFIG(C,E,LVS,DISPLAYA), CONFIG(D,E,LHS,DISPLAYA), CONFIG(D,E,LSPI,DISPLAYA),
+ CONFIGEND(),
+ // config 3, 8 & 9b CPU.
+ CONFIG(C,G,LHP1,DISPLAYA), CONFIG(C,G,LHP2,DISPLAYA), CONFIG(C,G,LVP1,DISPLAYA),
+ CONFIG(C,G,LHP0,DISPLAYA), CONFIG(D,G,LDI,DISPLAYA), CONFIG(D,G,LPP,DISPLAYA),
+ CONFIG(D,E,LPW0,DISPLAYA), CONFIG(D,E,LPW1,DISPLAYA), CONFIG(D,E,LPW2,DISPLAYA),
+ CONFIG(C,E,LSC1,DISPLAYA), CONFIG(C,E,LM1,DISPLAYA), CONFIG(C,E,LVP0,DISPLAYA),
+ CONFIGEND(),
+ // config 4. SPI
+ CONFIG(D,E,LPW0,DISPLAYA), CONFIG(D,E,LPW2,DISPLAYA), CONFIG(C,E,LSC1,DISPLAYA),
+ CONFIG(C,E,LM0,DISPLAYA), CONFIG(C,E,LVP0,DISPLAYA), CONFIGEND(),
+ // Config 5. Panel 86
+ BRANCH(7),CONFIG(C,E,LSC1,DISPLAYA),CONFIG(C,E,LM1,DISPLAYA),CONFIGEND(),
+ // config 6. 16/18b smart panels
+ BRANCH(7),CONFIG(C,E,LDC,DISPLAYA),CONFIG(D,E,LSPI,DISPLAYA),CONFIGEND(),
+ MODULEDONE(),
+ // subroutine 1. - 18b data + clock
+ CONFIG(C,F,LD0,DISPLAYA), CONFIG(C,F,LD1,DISPLAYA), CONFIG(C,F,LD2,DISPLAYA),
+ CONFIG(C,F,LD3,DISPLAYA), CONFIG(C,F,LD4,DISPLAYA), CONFIG(C,F,LD5,DISPLAYA),
+ CONFIG(C,F,LD6,DISPLAYA), CONFIG(C,F,LD7,DISPLAYA), CONFIG(C,F,LD8,DISPLAYA),
+ CONFIG(C,F,LD9,DISPLAYA), CONFIG(C,F,LD10,DISPLAYA), CONFIG(C,F,LD11,DISPLAYA),
+ CONFIG(C,F,LD12,DISPLAYA), CONFIG(C,F,LD13,DISPLAYA), CONFIG(C,F,LD14,DISPLAYA),
+ CONFIG(C,F,LD15,DISPLAYA), CONFIG(C,G,LD16,DISPLAYA), CONFIG(C,G,LD17,DISPLAYA),
+ CONFIG(C,E,LSC0,DISPLAYA), CONFIGEND(),
+ SUBROUTINESDONE(), // This is required, since BRANCH is used.
+/* For handy reference, here is the complete list of CONFIG macros for the display
+ pad groups, in case any more configurations are defined in the future.
+ CONFIG(C,F,LD0,DISPLAYA), CONFIG(C,F,LD1,DISPLAYA), CONFIG(C,F,LD2,DISPLAYA),
+ CONFIG(C,F,LD3,DISPLAYA), CONFIG(C,F,LD4,DISPLAYA), CONFIG(C,F,LD5,DISPLAYA),
+ CONFIG(C,F,LD6,DISPLAYA), CONFIG(C,F,LD7,DISPLAYA), CONFIG(C,F,LD8,DISPLAYA),
+ CONFIG(C,F,LD9,DISPLAYA), CONFIG(C,F,LD10,DISPLAYA), CONFIG(C,F,LD11,DISPLAYA),
+ CONFIG(C,F,LD12,DISPLAYA),
+ CONFIG(C,F,LD13,DISPLAYA), CONFIG(C,F,LD14,DISPLAYA), CONFIG(C,F,LD15,DISPLAYA),
+ CONFIG(C,G,LD16,DISPLAYA), CONFIG(C,G,LD17,DISPLAYA),CONFIG(C,E,LSC0,DISPLAYA),
+ CONFIG(C,E,LVS,DISPLAYA), CONFIG(D,E,LHS,DISPLAYA), CONFIG(D,E,LSPI,DISPLAYA),
+ CONFIG(C,G,LHP1,DISPLAYA), CONFIG(C,G,LHP2,DISPLAYA), CONFIG(C,G,LHP0,DISPLAYA),
+ CONFIG(C,G,LVP1,DISPLAYA), CONFIG(D,G,LDI,DISPLAYA), CONFIG(D,G,LPP,DISPLAYA),
+ CONFIG(C,E,LCSN,DISPLAYA), CONFIG(C,E,LM1,DISPLAYA),CONFIG(C,E,LM0,DISPLAYA),
+ CONFIG(D,E,LPW0,DISPLAYA),CONFIG(D,E,LPW2,DISPLAYA), CONFIG(D,E,LPW1,DISPLAYA),
+ CONFIG(C,E,LVP0,DISPLAYA), CONFIG(C,E,LDC,DISPLAYA), CONFIG(C,E,LSC1,DISPLAYA),
+ CONFIG(D,E,LSDI,DISPLAYA),
+ */
+};
+
+static const NvU32 g_Ap20Mux_Display2[] = {
+ CONFIGEND(),
+ // config 1, 24b RGB. Pure superset of Config2 (18b RGB)
+ BRANCH(2),
+ CONFIG(C,G,LHP1,DISPLAYB),CONFIG(C,G,LHP2,DISPLAYB),CONFIG(C,G,LVP1,DISPLAYB),
+ CONFIG(C,G,LHP0,DISPLAYB),CONFIG(D,G,LDI,DISPLAYB),CONFIG(D,G,LPP,DISPLAYB),
+ CONFIGEND(),
+ // config 2, 18b RGB.
+ BRANCH(7),
+ CONFIG(C,E,LVS,DISPLAYB), CONFIG(D,E,LHS,DISPLAYB), CONFIG(D,E,LSPI,DISPLAYB),
+ CONFIGEND(),
+ // config 3, 8 & 9b CPU.
+ CONFIG(C,G,LHP1,DISPLAYB), CONFIG(C,G,LHP2,DISPLAYB), CONFIG(C,G,LVP1,DISPLAYB),
+ CONFIG(C,G,LHP0,DISPLAYB), CONFIG(D,G,LDI,DISPLAYB), CONFIG(D,G,LPP,DISPLAYB),
+ CONFIG(D,E,LPW0,DISPLAYB), CONFIG(D,E,LPW1,DISPLAYB), CONFIG(D,E,LPW2,DISPLAYB),
+ CONFIG(C,E,LSC1,DISPLAYB), CONFIG(C,E,LM1,DISPLAYB), CONFIG(C,E,LVP0,DISPLAYB),
+ CONFIGEND(),
+ // config 4. SPI
+ CONFIG(D,E,LPW0,DISPLAYB), CONFIG(D,E,LPW2,DISPLAYB), CONFIG(C,E,LSC1,DISPLAYB),
+ CONFIG(C,E,LM0,DISPLAYB), CONFIG(C,E,LVP0,DISPLAYB), CONFIGEND(),
+ // Config 5. Panel 86
+ BRANCH(7),CONFIG(C,E,LSC1,DISPLAYB),CONFIG(C,E,LM1,DISPLAYB),CONFIGEND(),
+ // config 6. 16/18b smart panels
+ BRANCH(7),CONFIG(C,E,LDC,DISPLAYB),CONFIG(D,E,LSPI,DISPLAYB),CONFIGEND(),
+ MODULEDONE(),
+ // subroutine 1. (config 7)
+ CONFIG(C,F,LD0,DISPLAYB), CONFIG(C,F,LD1,DISPLAYB), CONFIG(C,F,LD2,DISPLAYB),
+ CONFIG(C,F,LD3,DISPLAYB), CONFIG(C,F,LD4,DISPLAYB), CONFIG(C,F,LD5,DISPLAYB),
+ CONFIG(C,F,LD6,DISPLAYB), CONFIG(C,F,LD7,DISPLAYB), CONFIG(C,F,LD8,DISPLAYB),
+ CONFIG(C,F,LD9,DISPLAYB), CONFIG(C,F,LD10,DISPLAYB), CONFIG(C,F,LD11,DISPLAYB),
+ CONFIG(C,F,LD12,DISPLAYB), CONFIG(C,F,LD13,DISPLAYB), CONFIG(C,F,LD14,DISPLAYB),
+ CONFIG(C,F,LD15,DISPLAYB), CONFIG(C,G,LD16,DISPLAYB), CONFIG(C,G,LD17,DISPLAYB),
+ CONFIG(C,E,LSC0,DISPLAYB), CONFIGEND(),
+ SUBROUTINESDONE(),
+};
+
+static const NvU32* g_Ap20MuxDisplay[] = {
+ &g_Ap20Mux_Display1[0],
+ &g_Ap20Mux_Display2[0],
+ NULL
+};
+
+static const NvU32 g_Ap20Mux_Crt[] = {
+ CONFIGEND(),
+ CONFIG(D,G,CRTP,CRT),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxCrt[] = {
+ &g_Ap20Mux_Crt[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Etm[] = {
+ CONFIGEND(),
+ CONFIG(A,A,KBCF,TRACE),CONFIG(A,C,KBCB,SDIO2),CONFIG(B,C,KBCC,TRACE),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxEtm[] = {
+ &g_Ap20Mux_Etm[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Owr[] = {
+ UNCONFIG(B,OWC,OWR,RSVD1),UNCONFIG(A,UAC,OWR,RSVD2),UNCONFIG(D,GPU,PWM,RSVD4),
+ CONFIGEND(),
+ // config 1
+ CONFIG(A,B,OWC,OWR),CONFIG(B,A,UAC,OWR),CONFIGEND(),
+ // config 2
+ CONFIG(A,B,OWC,OWR),CONFIG(A,D,GPU,PWM),CONFIGEND(),
+ // config 3
+ CONFIG(A,B,OWC,OWR),CONFIG(A,A,KBCE,OWR),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxOwr[] = {
+ &g_Ap20Mux_Owr[0],
+ NULL,
+};
+
+static const NvU32 g_Ap20Mux_Pcie[] = {
+ CONFIGEND(),
+ CONFIG(A,D,GPV,PCIE),CONFIG(B,D,SDC,PWM),CONFIG(B,B,SLXK,PCIE),
+ CONFIG(B,B,SLXA,PCIE),CONFIGEND(),
+ MODULEDONE(),
+};
+
+static const NvU32 *g_Ap20MuxPcie[] = {
+ &g_Ap20Mux_Pcie[0],
+ NULL,
+};
+
+static const NvU32** g_Ap20MuxControllers[] = {
+ &g_Ap20MuxAta[0],
+ &g_Ap20MuxCrt[0],
+ NULL, // no options for CSI
+ &g_Ap20MuxDap[0],
+ &g_Ap20MuxDisplay[0],
+ NULL, // no options for DSI
+ NULL, // no options for GPIO
+ &g_Ap20MuxHdcp[0],
+ &g_Ap20MuxHdmi[0],
+ &g_Ap20MuxHsi[0],
+ NULL, // No options for HSMMC
+ NULL, // no options for I2S
+ &g_Ap20MuxI2c[0],
+ &g_Ap20MuxI2cPmu[0],
+ &g_Ap20MuxKbd[0],
+ &g_Ap20MuxMio[0],
+ &g_Ap20MuxNand[0],
+ &g_Ap20MuxPwm[0],
+ &g_Ap20MuxSdio[0],
+ &g_Ap20MuxSflash[0],
+ NULL, // no options for Slink
+ &g_Ap20MuxSpdif[0],
+ &g_Ap20MuxSpi[0],
+ &g_Ap20MuxTwc[0],
+ NULL, // no options for TVO
+ &g_Ap20MuxUart[0],
+ NULL, // no options for USB
+ NULL, // no options for VDD
+ &g_Ap20MuxVi[0],
+ NULL, // no options for XIO
+ &g_Ap20MuxExtClock[0],
+ &g_Ap20MuxUlpi[0],
+ &g_Ap20MuxOwr[0],
+ &g_Ap20MuxSnor[0], // SyncNOR
+ &g_Ap20MuxPcie[0],
+ &g_Ap20MuxEtm[0],
+ NULL, // no options for TSENSor
+ &g_Ap20MuxBacklight[0],
+};
+
+NV_CT_ASSERT(NV_ARRAY_SIZE(g_Ap20MuxControllers)==NvOdmIoModule_Num);
+
+const NvU32***
+NvRmAp20GetPinMuxConfigs(NvRmDeviceHandle hDevice)
+{
+ NV_ASSERT(hDevice);
+ return (const NvU32***) g_Ap20MuxControllers;
+}
+
+/* Define the GPIO port/pin to tristate mappings */
+
+const NvU16 g_Ap20GpioPadGroupMapping[] =
+{
+ // Port A
+ GPIO_TRISTATE(A,DTE), GPIO_TRISTATE(B,UCB), GPIO_TRISTATE(A,DAP2), GPIO_TRISTATE(A,DAP2),
+ GPIO_TRISTATE(A,DAP2), GPIO_TRISTATE(A,DAP2), GPIO_TRISTATE(B,SDD), GPIO_TRISTATE(B,SDD),
+ // Port B
+ GPIO_TRISTATE(A,GMC), GPIO_TRISTATE(A,GMC), GPIO_TRISTATE(D,LPW0), GPIO_TRISTATE(C,LSC0),
+ GPIO_TRISTATE(B,SDC), GPIO_TRISTATE(B,SDC), GPIO_TRISTATE(B,SDC), GPIO_TRISTATE(B,SDC),
+ // Port C
+ GPIO_TRISTATE(B,UCB), GPIO_TRISTATE(D,LPW1), GPIO_TRISTATE(B,UAD), GPIO_TRISTATE(B,UAD),
+ GPIO_TRISTATE(A,RM), GPIO_TRISTATE(A,RM), GPIO_TRISTATE(D,LPW2), GPIO_TRISTATE(B,GMB),
+ // Port D
+ GPIO_TRISTATE(B,SLXK), GPIO_TRISTATE(B,SLXA), GPIO_TRISTATE(A,DTE), GPIO_TRISTATE(B,SLXC),
+ GPIO_TRISTATE(B,SLXD), GPIO_TRISTATE(A,DTA), GPIO_TRISTATE(A,DTC), GPIO_TRISTATE(A,DTC),
+ // Port E
+ GPIO_TRISTATE(C,LD0), GPIO_TRISTATE(C,LD1), GPIO_TRISTATE(C,LD2), GPIO_TRISTATE(C,LD3),
+ GPIO_TRISTATE(C,LD4), GPIO_TRISTATE(C,LD5), GPIO_TRISTATE(C,LD6), GPIO_TRISTATE(C,LD7),
+ // Port F
+ GPIO_TRISTATE(C,LD8),GPIO_TRISTATE(C,LD9),GPIO_TRISTATE(C,LD10), GPIO_TRISTATE(C,LD11),
+ GPIO_TRISTATE(C,LD12),GPIO_TRISTATE(C,LD13),GPIO_TRISTATE(C,LD14), GPIO_TRISTATE(C,LD15),
+ // Port G
+ GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC),GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC),
+ GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC),GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC),
+ // Port H
+ GPIO_TRISTATE(A,ATD), GPIO_TRISTATE(A,ATD),GPIO_TRISTATE(A,ATD), GPIO_TRISTATE(A,ATD),
+ GPIO_TRISTATE(B,ATE), GPIO_TRISTATE(B,ATE),GPIO_TRISTATE(B,ATE), GPIO_TRISTATE(B,ATE),
+ // Port I
+ GPIO_TRISTATE(A,ATC),GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATB), GPIO_TRISTATE(A,ATA),
+ GPIO_TRISTATE(A,ATA),GPIO_TRISTATE(A,ATB), GPIO_TRISTATE(A,ATA), GPIO_TRISTATE(A,ATC),
+ // Port J
+ GPIO_TRISTATE(B,GMD),GPIO_TRISTATE(D,LSPI), GPIO_TRISTATE(B,GMD), GPIO_TRISTATE(D,LHS),
+ GPIO_TRISTATE(C,LVS),GPIO_TRISTATE(A,IRTX), GPIO_TRISTATE(A,IRRX), GPIO_TRISTATE(A,GMC),
+ // Port K
+ GPIO_TRISTATE(A,ATC),GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC), GPIO_TRISTATE(A,ATC),
+ GPIO_TRISTATE(A,ATC),GPIO_TRISTATE(B,SPDO), GPIO_TRISTATE(B,SPDI), GPIO_TRISTATE(A,GMC),
+ // Port L
+ GPIO_TRISTATE(A,DTD),GPIO_TRISTATE(A,DTD), GPIO_TRISTATE(A,DTD), GPIO_TRISTATE(A,DTD),
+ GPIO_TRISTATE(A,DTD),GPIO_TRISTATE(A,DTD), GPIO_TRISTATE(A,DTD), GPIO_TRISTATE(A,DTD),
+ // Port M
+ GPIO_TRISTATE(C,LD16),GPIO_TRISTATE(C,LD17), GPIO_TRISTATE(C,LHP1), GPIO_TRISTATE(C,LHP2),
+ GPIO_TRISTATE(C,LVP1),GPIO_TRISTATE(C,LHP0), GPIO_TRISTATE(D,LDI), GPIO_TRISTATE(D,LPP),
+ // Port N
+ GPIO_TRISTATE(A,DAP1),GPIO_TRISTATE(A,DAP1), GPIO_TRISTATE(A,DAP1), GPIO_TRISTATE(A,DAP1),
+ GPIO_TRISTATE(C,LCSN),GPIO_TRISTATE(D,LSDA), GPIO_TRISTATE(C,LDC), GPIO_TRISTATE(C,HDINT),
+ // Port O
+ GPIO_TRISTATE(B,UAB),GPIO_TRISTATE(B,UAA), GPIO_TRISTATE(B,UAA), GPIO_TRISTATE(B,UAA),
+ GPIO_TRISTATE(B,UAA),GPIO_TRISTATE(B,UAB), GPIO_TRISTATE(B,UAB), GPIO_TRISTATE(B,UAB),
+ // Port P
+ GPIO_TRISTATE(A,DAP3),GPIO_TRISTATE(A,DAP3), GPIO_TRISTATE(A,DAP3), GPIO_TRISTATE(A,DAP3),
+ GPIO_TRISTATE(A,DAP4),GPIO_TRISTATE(A,DAP4), GPIO_TRISTATE(A,DAP4), GPIO_TRISTATE(A,DAP4),
+ // Port Q
+ GPIO_TRISTATE(B,KBCC),GPIO_TRISTATE(B,KBCC), GPIO_TRISTATE(A,KBCF), GPIO_TRISTATE(A,KBCF),
+ GPIO_TRISTATE(A,KBCF),GPIO_TRISTATE(A,KBCF), GPIO_TRISTATE(A,KBCF), GPIO_TRISTATE(A,KBCE),
+ // Port R
+ GPIO_TRISTATE(A,KBCA),GPIO_TRISTATE(A,KBCA), GPIO_TRISTATE(A,KBCA), GPIO_TRISTATE(D,KBCD),
+ GPIO_TRISTATE(D,KBCD),GPIO_TRISTATE(D,KBCD), GPIO_TRISTATE(D,KBCD), GPIO_TRISTATE(A,KBCB),
+ // Port S
+ GPIO_TRISTATE(A,KBCB),GPIO_TRISTATE(A,KBCB), GPIO_TRISTATE(A,KBCB), GPIO_TRISTATE(A,KBCB),
+ GPIO_TRISTATE(A,KBCB),GPIO_TRISTATE(A,KBCB), GPIO_TRISTATE(A,KBCB), GPIO_TRISTATE(A,KBCB),
+ // Port T
+ GPIO_TRISTATE(A,DTD), GPIO_TRISTATE(A,CSUS), GPIO_TRISTATE(A,DTB), GPIO_TRISTATE(A,DTB),
+ GPIO_TRISTATE(A,DTA), GPIO_TRISTATE(A,PTA), GPIO_TRISTATE(A,PTA), GPIO_TRISTATE(A,ATB),
+ // Port U
+ GPIO_TRISTATE(A,GPU), GPIO_TRISTATE(A,GPU), GPIO_TRISTATE(A,GPU), GPIO_TRISTATE(A,GPU),
+ GPIO_TRISTATE(A,GPU), GPIO_TRISTATE(A,GPU), GPIO_TRISTATE(A,GPU), GPIO_TRISTATE(D,GPU7),
+ // Port V
+ GPIO_TRISTATE(B,UAC), GPIO_TRISTATE(B,UAC), GPIO_TRISTATE(B,UAC), GPIO_TRISTATE(B,UAC),
+ GPIO_TRISTATE(A,GPV), GPIO_TRISTATE(A,GPV), GPIO_TRISTATE(A,GPV), GPIO_TRISTATE(C,LVP0),
+ // Port W
+ GPIO_TRISTATE(C,LM0), GPIO_TRISTATE(C,LM1), GPIO_TRISTATE(B,SPIG), GPIO_TRISTATE(B,SPIH),
+ GPIO_TRISTATE(A,CDEV1), GPIO_TRISTATE(A,CDEV2),GPIO_TRISTATE(B,UCA),GPIO_TRISTATE(B,UCA),
+ // Port X
+ GPIO_TRISTATE(B,SPIA),GPIO_TRISTATE(B,SPIB),GPIO_TRISTATE(B,SPIC),GPIO_TRISTATE(B,SPIC),
+ GPIO_TRISTATE(B,SPID),GPIO_TRISTATE(B,SPIE),GPIO_TRISTATE(B,SPIE),GPIO_TRISTATE(B,SPIF),
+ // Port Y
+ GPIO_TRISTATE(D,UDA),GPIO_TRISTATE(D,UDA),GPIO_TRISTATE(D,UDA),GPIO_TRISTATE(D,UDA),
+ GPIO_TRISTATE(A,SDIO1),GPIO_TRISTATE(A,SDIO1),GPIO_TRISTATE(A,SDIO1),GPIO_TRISTATE(A,SDIO1),
+ // Port Z
+ GPIO_TRISTATE(A,SDIO1),GPIO_TRISTATE(A,SDIO1),GPIO_TRISTATE(D,LSDI),GPIO_TRISTATE(C,LSC1),
+ GPIO_TRISTATE(C,LSCK), GPIO_TRISTATE(A,PMC), GPIO_TRISTATE(A,I2CP), GPIO_TRISTATE(A,I2CP),
+ // Port AA
+ GPIO_TRISTATE(A,GMA), GPIO_TRISTATE(A,GMA), GPIO_TRISTATE(A,GMA), GPIO_TRISTATE(A,GMA),
+ GPIO_TRISTATE(B,GME), GPIO_TRISTATE(B,GME), GPIO_TRISTATE(B,GME), GPIO_TRISTATE(B,GME),
+ // Port BB
+ GPIO_TRISTATE(A,PMC), GPIO_TRISTATE(A,DTE), GPIO_TRISTATE(D,DTF), GPIO_TRISTATE(D,DTF),
+ GPIO_TRISTATE(A,DTE), GPIO_TRISTATE(A,DTE),
+};
+
+NvBool
+NvRmAp20GetPinGroupForGpio(NvRmDeviceHandle hDevice,
+ NvU32 Port,
+ NvU32 Pin,
+ NvU32 *pMapping)
+{
+ const NvU32 GpiosPerPort = 8;
+ NvU32 Index = Port*GpiosPerPort + Pin;
+
+ if ((Pin >= GpiosPerPort) || (Index >= NV_ARRAY_SIZE(g_Ap20GpioPadGroupMapping)))
+ return NV_FALSE;
+
+ *pMapping = (NvU32)g_Ap20GpioPadGroupMapping[Index];
+ return NV_TRUE;
+}
+
+NvU32
+NvRmPrivAp20GetExternalClockSourceFreq(
+ NvRmDeviceHandle hDevice,
+ const NvU32* Instance,
+ NvU32 Config)
+{
+ NvU32 MuxCtlShift, MuxCtlSet;
+ NvU32 ClockFreqInKHz = 0;
+
+ MuxCtlShift = NV_DRF_VAL(MUX,ENTRY,MUX_CTL_SHIFT,*Instance);
+ MuxCtlSet = NV_DRF_VAL(MUX,ENTRY,MUX_CTL_SET,*Instance);
+
+ switch (MuxCtlShift)
+ {
+ case APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV1_SEL_SHIFT:
+ if (MuxCtlSet==APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV1_SEL_PLLA_OUT)
+ ClockFreqInKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllA0);
+ else if (MuxCtlSet==APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV1_SEL_OSC)
+ ClockFreqInKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
+ break;
+ case APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV2_SEL_SHIFT:
+ if (MuxCtlSet == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV2_SEL_AHB_CLK)
+ ClockFreqInKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_Ahb);
+ else if (MuxCtlSet == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV2_SEL_OSC)
+ ClockFreqInKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
+ else if (MuxCtlSet == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV2_SEL_PLLP_OUT4)
+ ClockFreqInKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_PllP4);
+ break;
+ case APB_MISC_PP_PIN_MUX_CTL_C_0_CSUS_SEL_SHIFT:
+ if (MuxCtlSet == APB_MISC_PP_PIN_MUX_CTL_C_0_CSUS_SEL_VI_SENSOR_CLK)
+ {
+ if (NvRmPowerModuleClockConfig(hDevice, NvRmModuleID_Vi, 0, 0, 0,
+ NULL, 0, &ClockFreqInKHz, NvRmClockConfig_SubConfig) != NvSuccess)
+ {
+ ClockFreqInKHz = 0;
+ }
+ }
+ break;
+ default:
+ ClockFreqInKHz = 0;
+ }
+
+ return ClockFreqInKHz;
+}
+
+void NvRmPrivAp20EnableExternalClockSource(
+ NvRmDeviceHandle hDevice,
+ const NvU32* Instance,
+ NvU32 Config,
+ NvBool ClockState)
+{
+ NvU32 MuxCtlShift = NV_DRF_VAL(MUX,ENTRY,MUX_CTL_SHIFT,*Instance);
+ NvU32 MuxCtlSet = NV_DRF_VAL(MUX,ENTRY,MUX_CTL_SET,*Instance);
+ NvU32 ClkEnbShift = ~0;
+
+ switch (MuxCtlShift)
+ {
+ case APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV1_SEL_SHIFT:
+ ClkEnbShift = CLK_RST_CONTROLLER_CLK_ENB_U_SET_0_SET_CLK_ENB_DEV1_OUT_SHIFT;
+ if (MuxCtlSet == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV1_SEL_PLLA_OUT)
+ {
+ NvRmPrivExternalClockAttach(
+ hDevice, NvRmClockSource_PllA0, ClockState);
+ }
+ break;
+ case APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV2_SEL_SHIFT:
+ ClkEnbShift = CLK_RST_CONTROLLER_CLK_ENB_U_SET_0_SET_CLK_ENB_DEV2_OUT_SHIFT;
+ if (MuxCtlSet == APB_MISC_PP_PIN_MUX_CTL_C_0_CDEV2_SEL_PLLP_OUT4)
+ {
+ NvRmPrivExternalClockAttach(
+ hDevice, NvRmClockSource_PllP4, ClockState);
+ }
+ break;
+ case APB_MISC_PP_PIN_MUX_CTL_C_0_CSUS_SEL_SHIFT:
+ ClkEnbShift = CLK_RST_CONTROLLER_CLK_ENB_U_SET_0_SET_CLK_ENB_SUS_OUT_SHIFT;
+ break;
+ default:
+ return;
+ }
+
+ if (ClockState)
+ {
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_ENB_U_SET_0, (1UL<<ClkEnbShift));
+ }
+ else
+ {
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_CLK_ENB_U_CLR_0, (1UL<<ClkEnbShift));
+ }
+}
+
+NvBool NvRmPrivAp20RmModuleToOdmModule(
+ NvRmModuleID RmModule,
+ NvOdmIoModule *OdmModule,
+ NvU32 *OdmInstance,
+ NvU32 *pCnt)
+{
+ NvRmModuleID Module = NVRM_MODULE_ID_MODULE(RmModule);
+ NvU32 Instance = NVRM_MODULE_ID_INSTANCE(RmModule);
+ NvBool Success = NV_TRUE;
+ *pCnt = 1;
+ switch (Module)
+ {
+ case NvRmModuleID_Usb2Otg:
+ switch (Instance)
+ {
+ case 0:
+ *OdmModule = NvOdmIoModule_Usb;
+ *OdmInstance = 0;
+ break;
+ case 1:
+ *OdmModule = NvOdmIoModule_Ulpi;
+ *OdmInstance = 0;
+ break;
+ case 2:
+ *OdmModule = NvOdmIoModule_Usb;
+ *OdmInstance = 1;
+ break;
+ default:
+ NV_ASSERT(!"Invalid USB instance");
+ break;
+ }
+ break;
+ case NvRmModuleID_OneWire:
+ *OdmModule = NvOdmIoModule_OneWire;
+ *OdmInstance = Instance;
+ break;
+ case NvRmModuleID_SyncNor:
+ *OdmModule = NvOdmIoModule_SyncNor;
+ *OdmInstance = Instance;
+ break;
+ case NvRmPrivModuleID_Pcie:
+ *OdmModule = NvOdmIoModule_PciExpress;
+ *OdmInstance = Instance;
+ break;
+ default:
+ Success = NV_FALSE;
+ *pCnt = 0;
+ break;
+ }
+ return Success;
+}
+
+NvError
+NvRmPrivAp20GetModuleInterfaceCaps(
+ NvOdmIoModule Module,
+ NvU32 Instance,
+ NvU32 PinMap,
+ void *pCaps)
+{
+ switch (Module)
+ {
+ case NvOdmIoModule_Sdio:
+ if (Instance == 1)
+ {
+ if (PinMap == NvOdmSdioPinMap_Config2 || PinMap == NvOdmSdioPinMap_Config4)
+ ((NvRmModuleSdmmcInterfaceCaps *)pCaps)->MmcInterfaceWidth = 8;
+ else if (PinMap == NvOdmSdioPinMap_Config1 ||
+ PinMap == NvOdmSdioPinMap_Config3 || PinMap == NvOdmSdioPinMap_Config5)
+ ((NvRmModuleSdmmcInterfaceCaps *)pCaps)->MmcInterfaceWidth = 4;
+ else
+ {
+ NV_ASSERT(NV_FALSE);
+ return NvError_NotSupported;
+ }
+ }
+ else if (Instance==2 && PinMap==NvOdmSdioPinMap_Config1)
+ ((NvRmModuleSdmmcInterfaceCaps *)pCaps)->MmcInterfaceWidth = 8;
+ else if (Instance==3 && (PinMap==NvOdmSdioPinMap_Config1 || PinMap==NvOdmSdioPinMap_Config2))
+ ((NvRmModuleSdmmcInterfaceCaps *)pCaps)->MmcInterfaceWidth = 8;
+ else
+ ((NvRmModuleSdmmcInterfaceCaps *)pCaps)->MmcInterfaceWidth = 4;
+ return NvError_Success;
+
+ case NvOdmIoModule_Pwm:
+ if (Instance == 0 && (PinMap == NvOdmPwmPinMap_Config1))
+ ((NvRmModulePwmInterfaceCaps *)pCaps)->PwmOutputIdSupported = 15;
+ else if (Instance == 0 && (PinMap == NvOdmPwmPinMap_Config2))
+ ((NvRmModulePwmInterfaceCaps *)pCaps)->PwmOutputIdSupported = 13;
+ else if (Instance == 0 && (PinMap == NvOdmPwmPinMap_Config3))
+ ((NvRmModulePwmInterfaceCaps *)pCaps)->PwmOutputIdSupported = 1;
+ else if (Instance == 0 && (PinMap == NvOdmPwmPinMap_Config4))
+ ((NvRmModulePwmInterfaceCaps *)pCaps)->PwmOutputIdSupported = 12;
+ else if (Instance == 0 && (PinMap == NvOdmPwmPinMap_Config5))
+ ((NvRmModulePwmInterfaceCaps *)pCaps)->PwmOutputIdSupported = 15;
+ else if (Instance == 0 && (PinMap == NvOdmPwmPinMap_Config6))
+ ((NvRmModulePwmInterfaceCaps *)pCaps)->PwmOutputIdSupported = 3;
+ else
+ {
+ ((NvRmModulePwmInterfaceCaps *)pCaps)->PwmOutputIdSupported = 0;
+ return NvError_NotSupported;
+ }
+ return NvError_Success;
+ case NvOdmIoModule_Nand:
+ if (Instance == 0 && (PinMap == NvOdmNandPinMap_Config1 || PinMap ==
+ NvOdmNandPinMap_Config3))
+ {
+ ((NvRmModuleNandInterfaceCaps*)pCaps)->IsCombRbsyMode = NV_TRUE;
+ ((NvRmModuleNandInterfaceCaps*)pCaps)->NandInterfaceWidth = 16;
+ }
+ else if (Instance == 0 && (PinMap == NvOdmNandPinMap_Config2 ||
+ PinMap == NvOdmNandPinMap_Config4))
+ {
+ ((NvRmModuleNandInterfaceCaps*)pCaps)->IsCombRbsyMode = NV_TRUE;
+ ((NvRmModuleNandInterfaceCaps*)pCaps)->NandInterfaceWidth = 8;
+ }
+ else
+ {
+ NV_ASSERT(NV_FALSE);
+ return NvError_NotSupported;
+ }
+ return NvSuccess;
+ case NvOdmIoModule_Uart:
+ if (Instance == 0)
+ {
+ if (PinMap == NvOdmUartPinMap_Config1)
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 8;
+ else if (PinMap == NvOdmUartPinMap_Config2)
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 7;
+ else if ((PinMap == NvOdmUartPinMap_Config3) || (PinMap == NvOdmUartPinMap_Config6))
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 4;
+ else if ((PinMap == NvOdmUartPinMap_Config4) || (PinMap == NvOdmUartPinMap_Config5))
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 2;
+ else if (PinMap == NvOdmUartPinMap_Config7)
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 6;
+ else
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 0;
+ }
+ else if ((Instance == 1) || (Instance == 2))
+ {
+ if (PinMap == NvOdmUartPinMap_Config1)
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 4;
+ else if (PinMap == NvOdmUartPinMap_Config2)
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 2;
+ else
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 0;
+ }
+ else if ((Instance == 3) || (Instance == 4))
+ {
+ if ((PinMap == NvOdmUartPinMap_Config1) || (PinMap == NvOdmUartPinMap_Config2))
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 4;
+ else
+ ((NvRmModuleUartInterfaceCaps *)pCaps)->NumberOfInterfaceLines = 0;
+ }
+ else
+ {
+ NV_ASSERT(NV_FALSE);
+ return NvError_NotSupported;
+ }
+ return NvSuccess;
+
+ default:
+ break;
+ }
+
+ return NvError_NotSupported;
+}
+
+NvError
+NvRmAp20GetStraps(
+ NvRmDeviceHandle hDevice,
+ NvRmStrapGroup StrapGroup,
+ NvU32* pStrapValue)
+{
+ NvU32 reg = NV_REGR(
+ hDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_STRAPPING_OPT_A_0);
+
+ switch (StrapGroup)
+ {
+ case NvRmStrapGroup_RamCode:
+ reg = NV_DRF_VAL(APB_MISC_PP, STRAPPING_OPT_A, RAM_CODE, reg);
+ break;
+ default:
+ return NvError_NotSupported;
+ }
+ *pStrapValue = reg;
+ return NvSuccess;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_power_dfs.c b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_power_dfs.c
new file mode 100644
index 000000000000..b1c0b38251c4
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_power_dfs.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "ap20rm_power_dfs.h"
+#include "nvassert.h"
+#include "nvrm_drf.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_pmu.h"
+#include "ap20/aremc.h"
+#include "ap20/arclk_rst.h"
+#include "ap20/arapb_misc.h"
+
+/*****************************************************************************/
+
+// Regsiter access macros for EMC module
+#define NV_EMC_REGR(pEmcRegs, reg) \
+ NV_READ32((((NvU32)(pEmcRegs)) + EMC_##reg##_0))
+#define NV_EMC_REGW(pEmcRegs, reg, val) \
+ NV_WRITE32((((NvU32)(pEmcRegs)) + EMC_##reg##_0), (val))
+
+// Regsiter access macros for APB MISC module
+#define NV_APB_REGR(pApbRegs, reg) \
+ NV_READ32((((NvU32)(pApbRegs)) + APB_MISC_##reg##_0))
+#define NV_APB_REGW(pApbRegs, reg, val) \
+ NV_WRITE32((((NvU32)(pApbRegs)) + APB_MISC_##reg##_0), (val))
+
+// TODO: Always Disable before check-in
+#define NVRM_TEST_PMREQUEST_UP_MODE (0)
+
+/*****************************************************************************/
+// EMC MODULE INTERFACES
+/*****************************************************************************/
+
+NvError NvRmPrivAp20EmcMonitorsInit(NvRmDfs* pDfs)
+{
+ NvU32 RegValue;
+ void* pEmcRegs = pDfs->Modules[NvRmDfsModuleId_Emc].pBaseReg;
+ NV_ASSERT(pEmcRegs);
+
+ /*
+ * EMC power management monitor belongs to EMC module - just reset it,
+ * and do not touch anything else in EMC.
+ */
+ RegValue = NV_EMC_REGR(pEmcRegs, STAT_CONTROL);
+ RegValue = NV_FLD_SET_DRF_DEF(EMC, STAT_CONTROL, PWR_GATHER, RST, RegValue);
+ NV_EMC_REGW(pEmcRegs, STAT_CONTROL, RegValue);
+
+ /*
+ * EMC active clock cycles = EMC monitor reading * 2^M, where M depends
+ * on DRAM type and bus width. Power M is stored as EMC readouts scale
+ */
+ #define COUNT_SHIFT_DDR1_X32 (1)
+ RegValue = NV_EMC_REGR(pEmcRegs, FBIO_CFG5);
+ switch (NV_DRF_VAL(EMC, FBIO_CFG5, DRAM_TYPE, RegValue))
+ {
+ case EMC_FBIO_CFG5_0_DRAM_TYPE_DDR1:
+ case EMC_FBIO_CFG5_0_DRAM_TYPE_LPDDR2:
+ case EMC_FBIO_CFG5_0_DRAM_TYPE_DDR2:
+ pDfs->Modules[NvRmDfsModuleId_Emc].Scale = COUNT_SHIFT_DDR1_X32;
+ break;
+ default:
+ NV_ASSERT(!"Not supported DRAM type");
+ }
+ if (NV_DRF_VAL(EMC, FBIO_CFG5, DRAM_WIDTH, RegValue) ==
+ EMC_FBIO_CFG5_0_DRAM_WIDTH_X16)
+ {
+ pDfs->Modules[NvRmDfsModuleId_Emc].Scale++;
+ }
+ return NvSuccess;
+}
+
+void NvRmPrivAp20EmcMonitorsDeinit(NvRmDfs* pDfs)
+{
+ // Stop monitor using initialization procedure
+ (void)NvRmPrivAp20EmcMonitorsInit(pDfs);
+}
+
+void
+NvRmPrivAp20EmcMonitorsStart(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ const NvU32 IntervalMs)
+{
+ NvU32 RegValue, SavedRegValue;
+ void* pEmcRegs = pDfs->Modules[NvRmDfsModuleId_Emc].pBaseReg;
+
+ // EMC sample period is specified in EMC clock cycles, accuracy 0-16 cycles.
+ #define MEAN_EMC_LIMIT_ERROR (8)
+ NvU32 cycles = IntervalMs * pDfsKHz->Domains[NvRmDfsClockId_Emc] +
+ MEAN_EMC_LIMIT_ERROR;
+ /*
+ * Start EMC power monitor for the next sample period: clear EMC counters,
+ * set sample interval limit in EMC cycles, enable monitoring. Monitor is
+ * counting EMC 1x clock cycles while any memory access is detected.
+ */
+ SavedRegValue = NV_EMC_REGR(pEmcRegs, STAT_CONTROL);
+ RegValue = NV_FLD_SET_DRF_DEF(EMC, STAT_CONTROL, PWR_GATHER, CLEAR, SavedRegValue);
+ NV_EMC_REGW(pEmcRegs, STAT_CONTROL, RegValue);
+
+ RegValue = NV_DRF_NUM(EMC, STAT_PWR_CLOCK_LIMIT, PWR_CLOCK_LIMIT, cycles);
+ NV_EMC_REGW(pEmcRegs, STAT_PWR_CLOCK_LIMIT, RegValue);
+
+ RegValue = NV_FLD_SET_DRF_DEF(EMC, STAT_CONTROL, PWR_GATHER, ENABLE, SavedRegValue);
+ NV_EMC_REGW(pEmcRegs, STAT_CONTROL, RegValue);
+}
+
+void
+NvRmPrivAp20EmcMonitorsRead(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfsIdleData* pIdleData)
+{
+ NvU32 RegValue, TotalClocks;
+ NvU32 CountShift = pDfs->Modules[NvRmDfsModuleId_Emc].Scale;
+ void* pEmcRegs = pDfs->Modules[NvRmDfsModuleId_Emc].pBaseReg;
+
+ /*
+ * Read EMC monitor: disable it (=stop, the readings are preserved), and
+ * determine idle count based on total and active clock counts. Monitor
+ * readings are multiplied by 2^M factor to determine active count, where
+ * power M depends on DRAM type and bus width. Store result in the idle
+ * data packet.
+ */
+ RegValue = NV_EMC_REGR(pEmcRegs, STAT_CONTROL);
+ RegValue = NV_FLD_SET_DRF_DEF(EMC, STAT_CONTROL, PWR_GATHER, DISABLE, RegValue);
+ NV_EMC_REGW(pEmcRegs, STAT_CONTROL, RegValue);
+
+ RegValue = NV_EMC_REGR(pEmcRegs, STAT_PWR_CLOCKS);
+ TotalClocks = NV_DRF_VAL(EMC, STAT_PWR_CLOCKS, PWR_CLOCKS, RegValue);
+ RegValue = NV_EMC_REGR(pEmcRegs, STAT_PWR_COUNT);
+ RegValue = NV_DRF_VAL(EMC, STAT_PWR_COUNT, PWR_COUNT, RegValue) << CountShift;
+
+ pIdleData->Readings[NvRmDfsClockId_Emc] =
+ (TotalClocks > RegValue) ? (TotalClocks - RegValue) : 0;
+}
+
+/*****************************************************************************/
+
+// AP20 Thermal policy definitions
+
+#define NVRM_THERMAL_DEGREES_HIGH (85L)
+#define NVRM_THERMAL_DEGREES_LOW (50L)
+#define NVRM_THERMAL_DEGREES_HYSTERESIS (5L)
+
+#define NVRM_THERMAL_POLL_MS_SLOW (200UL)
+#define NVRM_THERMAL_POLL_MS_FAST (100UL)
+#define NVRM_THERMAL_POLL_MS_CRITICAL (50UL)
+#define NVRM_THERMAL_POLL_INTR_FACTOR (10UL)
+
+#define NVRM_THERMAL_CPU_KHZ_LOW (500000UL)
+
+#define NVRM_THERMAL_CPU_DELTA_KHZ_LOW (200000L)
+#define NVRM_THERMAL_CPU_DELTA_KHZ_HIGH (100000L)
+#define NVRM_THERMAL_CPU_DELTA_KHZ_CRITICAL (-100000L)
+
+void
+NvRmPrivAp20DttGetTcorePolicy(
+ NvS32 TemperatureC,
+ const NvRmDtt* pDtt,
+ NvS32* pLowLimit,
+ NvS32* pHighLimit,
+ NvU32* pPollMs)
+{
+ NvU32 msec;
+ NvS32 LowLimit, HighLimit;
+
+ NV_ASSERT(pDtt->TcoreCaps.Tmin <
+ (NVRM_THERMAL_DEGREES_LOW - NVRM_THERMAL_DEGREES_HYSTERESIS));
+ NV_ASSERT(pDtt->TcoreCaps.Tmax > NVRM_THERMAL_DEGREES_HIGH);
+
+ /*
+ * Temperature limits policy: limits are laways set "around" current
+ * temperature for the next out-of-limit interrupt; range boundaries
+ * are used for low and critical temperature.
+ */
+ if (TemperatureC <= NVRM_THERMAL_DEGREES_LOW)
+ {
+ LowLimit = pDtt->TcoreLowLimitCaps.MinValue;
+ HighLimit = NVRM_THERMAL_DEGREES_LOW;
+ }
+ else if (TemperatureC <= NVRM_THERMAL_DEGREES_HIGH)
+ {
+ LowLimit = NVRM_THERMAL_DEGREES_LOW - NVRM_THERMAL_DEGREES_HYSTERESIS;
+ HighLimit = NVRM_THERMAL_DEGREES_HIGH;
+
+ }
+ else
+ {
+ LowLimit = NVRM_THERMAL_DEGREES_HIGH - NVRM_THERMAL_DEGREES_HYSTERESIS;
+ HighLimit = pDtt->TcoreHighLimitCaps.MaxValue;
+ }
+
+ /*
+ * Polling time policy:
+ * - low/high temperature in polling mode: return policy value
+ * - low/high temperature in interrupt mode: policy value increased by intr
+ * factor (do not need polling at all in this mode, but just in case ...)
+ * - critical temperature and any mode: return policy value (we do need
+ * polling even in interrupt mode for active throttling)
+ * Keep higher polling rate inside hysteresis range.
+ */
+ if (TemperatureC <=
+ (NVRM_THERMAL_DEGREES_LOW - NVRM_THERMAL_DEGREES_HYSTERESIS))
+ {
+ if (pDtt->UseIntr)
+ msec = NVRM_THERMAL_POLL_MS_SLOW * NVRM_THERMAL_POLL_INTR_FACTOR;
+ else
+ msec = NVRM_THERMAL_POLL_MS_SLOW;
+ }
+ else if (TemperatureC <=
+ (NVRM_THERMAL_DEGREES_HIGH - NVRM_THERMAL_DEGREES_HYSTERESIS))
+ {
+ if (pDtt->UseIntr)
+ msec = NVRM_THERMAL_POLL_MS_FAST * NVRM_THERMAL_POLL_INTR_FACTOR;
+ else
+ msec = NVRM_THERMAL_POLL_MS_FAST;
+
+ }
+ else
+ {
+ msec = NVRM_THERMAL_POLL_MS_CRITICAL;
+ }
+
+ // Fill in return values
+ *pLowLimit = LowLimit;
+ *pHighLimit = HighLimit;
+ *pPollMs = msec;
+}
+
+NvBool
+NvRmPrivAp20DttClockUpdate(
+ NvRmDeviceHandle hRmDevice,
+ NvS32 TemperatureC,
+ const NvRmDfsFrequencies* pCurrentKHz,
+ NvRmDfsFrequencies* pDfsKHz)
+{
+ // Only CPU throttling for now
+ NvRmFreqKHz DeltaKHz;
+ NvRmFreqKHz CpuTargetKHz = pDfsKHz->Domains[NvRmDfsClockId_Cpu];
+ NvRmFreqKHz CpuThrottledKHz = pCurrentKHz->Domains[NvRmDfsClockId_Cpu];
+ NvBool Throttled = NV_FALSE;
+
+ // If CPU target is already low, no throttling
+ if (CpuTargetKHz <= NVRM_THERMAL_CPU_KHZ_LOW)
+ return Throttled;
+
+ // Determine max frequency delta based on temperature
+ if (TemperatureC <= NVRM_THERMAL_DEGREES_LOW)
+ DeltaKHz = NVRM_THERMAL_CPU_DELTA_KHZ_LOW;
+ else if (TemperatureC <= NVRM_THERMAL_DEGREES_HIGH)
+ DeltaKHz = NVRM_THERMAL_CPU_DELTA_KHZ_HIGH;
+ else
+ DeltaKHz = NVRM_THERMAL_CPU_DELTA_KHZ_CRITICAL;
+
+ // Find throttled limit
+ CpuThrottledKHz += DeltaKHz;
+ if (((NvS32)CpuThrottledKHz) < 0)
+ CpuThrottledKHz = 0;
+
+ // Find and set new target
+ CpuTargetKHz = NV_MIN(CpuTargetKHz, CpuThrottledKHz);
+ CpuTargetKHz = NV_MAX(CpuTargetKHz, NVRM_THERMAL_CPU_KHZ_LOW);
+ if (CpuTargetKHz < pDfsKHz->Domains[NvRmDfsClockId_Cpu])
+ {
+ Throttled = NV_TRUE;
+ }
+ pDfsKHz->Domains[NvRmDfsClockId_Cpu] = CpuTargetKHz;
+ return Throttled;
+}
+
+/*****************************************************************************/
+
+NvRmPmRequest
+NvRmPrivAp20GetPmRequest(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDfsSampler* pCpuSampler,
+ NvRmFreqKHz CpuKHz)
+{
+ // Assume initial slave CPU1 On request
+ static NvRmPmRequest s_LastPmRequest = (NvRmPmRequest_CpuOnFlag | 0x1);
+ static NvRmFreqKHz s_Cpu1OnMinKHz = 0, s_Cpu1OffMaxKHz = 0;
+
+ NvRmPmRequest PmRequest = NvRmPmRequest_None;
+ NvBool Cpu1Off =
+ (0 != NV_DRF_VAL(CLK_RST_CONTROLLER, RST_CPU_CMPLX_SET, SET_CPURESET1,
+ NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET_0)));
+ NvRmFreqKHz CpuLoadGaugeKHz = pCpuSampler->BumpedAverageKHz;
+
+ // Slave CPU1 power management policy thresholds:
+ // - use fixed values if they are defined explicitly, otherwise
+ // - use max CPU frequency at min CPU voltage) as CPU1 OffMax threshold,
+ // and half of that frequency as CPU1 OnMin threshold
+ if ((s_Cpu1OffMaxKHz == 0) && (s_Cpu1OnMinKHz == 0))
+ {
+ NvU32 n;
+ const NvRmFreqKHz* p = NvRmPrivModuleVscaleGetMaxKHzList(
+ hRmDevice, NvRmModuleID_Cpu, &n);
+
+ NV_ASSERT (p && n);
+ s_Cpu1OnMinKHz = NVRM_CPU1_ON_MIN_KHZ ?
+ NVRM_CPU1_ON_MIN_KHZ : (p[0] >> 1);
+ s_Cpu1OffMaxKHz = NVRM_CPU1_OFF_MAX_KHZ ?
+ NVRM_CPU1_OFF_MAX_KHZ : p[0];
+ NV_ASSERT(s_Cpu1OnMinKHz < s_Cpu1OffMaxKHz);
+ }
+
+ /*
+ * Request OS kernel to turn CPU1 Off if all of the following is true:
+ * (a) CPU frequency is below OnMin threshold,
+ * (b) Last request was CPU1 On request
+ * (c) CPU1 is actually On
+ *
+ * Request OS kernel to turn CPU1 On if all of the following is true:
+ * (a) CPU frequency is above OffMax threshold
+ * (b) Last request was CPU1 Off request
+ * (c) CPU1 is actually Off
+ */
+ if (CpuLoadGaugeKHz < s_Cpu1OnMinKHz)
+ {
+ if ((s_LastPmRequest & NvRmPmRequest_CpuOnFlag) && (!Cpu1Off))
+ s_LastPmRequest = PmRequest = (NvRmPmRequest_CpuOffFlag | 0x1);
+#if NVRM_TEST_PMREQUEST_UP_MODE
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET_0,
+ CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET_0_SET_CPURESET1_FIELD);
+#endif
+ }
+ else if (CpuLoadGaugeKHz > s_Cpu1OffMaxKHz)
+ {
+ if ((s_LastPmRequest & NvRmPmRequest_CpuOffFlag) && Cpu1Off)
+ s_LastPmRequest = PmRequest = (NvRmPmRequest_CpuOnFlag | 0x1);
+#if NVRM_TEST_PMREQUEST_UP_MODE
+ NV_REGW(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR_0,
+ CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR_0_CLR_CPURESET1_FIELD);
+#endif
+ }
+ return PmRequest;
+}
+
+/*****************************************************************************/
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_power_dfs.h b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_power_dfs.h
new file mode 100644
index 000000000000..685d04568d27
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_power_dfs.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Power Resource manager </b>
+ *
+ * @b Description: NvRM DFS parameters.
+ *
+ */
+
+#ifndef INCLUDED_AP20RM_POWER_DFS_H
+#define INCLUDED_AP20RM_POWER_DFS_H
+
+#include "nvrm_power_dfs.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+// Min KHz for CPU and AVP with regards to JTAG support - 1MHz * 8 = 8MHz
+// TODO: any other limitations on min KHz?
+// TODO: adjust boost parameters based on testing
+
+/**
+ * Default DFS algorithm parameters for CPU domain
+ */
+#define NVRM_DFS_PARAM_CPU_AP20 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 40000, /* Minimum domain frequency 40 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 32000, /* Fixed frequency boost increase 32 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 4000, /* Fixed frequency boost increase 4 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ 3, /* Relative adjustement of average freqiency 1/2^3 ~ 12% */ \
+ 1, /* Number of smaple intervals with NRT to trigger boost = 2 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/**
+ * Default DFS algorithm parameters for AVP domain
+ */
+#define NVRM_DFS_PARAM_AVP_AP20 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 24000, /* Minimum domain frequency 24 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 8000, /* Fixed frequency boost increase 8 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 1000, /* Fixed frequency NRT boost increase 1 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ 3, /* Relative adjustement of average freqiency 1/2^3 ~ 12% */ \
+ 2, /* Number of smaple intervals with NRT to trigger boost = 3 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/**
+ * Default DFS algorithm parameters for System clock domain
+ */
+#define NVRM_DFS_PARAM_SYSTEM_AP20 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 24000, /* Minimum domain frequency 24 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 8000, /* Fixed frequency boost increase 8 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 1000, /* Fixed frequency NRT boost increase 1 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 32, /* Proportional frequency boost decrease 32/256 ~ 12% */ \
+ },\
+ 5, /* Relative adjustement of average freqiency 1/2^5 ~ 3% */ \
+ 2, /* Number of smaple intervals with NRT to trigger boost = 3 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/**
+ * Default DFS algorithm parameters for AHB clock domain
+ */
+#define NVRM_DFS_PARAM_AHB_AP20 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 24000, /* Minimum domain frequency 24 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 8000, /* Fixed frequency boost increase 8 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 1000, /* Fixed frequency NRT boost increase 1 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 32, /* Proportional frequency boost decrease 32/256 ~ 12% */ \
+ },\
+ 0, /* Relative adjustement of average freqiency 1/2^0 ~ 100% */ \
+ 0, /* Number of smaple intervals with NRT to trigger boost = 1 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/**
+ * Default DFS algorithm parameters for APB clock domain
+ */
+#define NVRM_DFS_PARAM_APB_AP20 \
+ NVRM_AP20_APB_MAX_KHZ, /* AP20 APB limit is lower than other buses */ \
+ 24000, /* Minimum domain frequency 24 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 8000, /* Fixed frequency boost increase 8 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 1000, /* Fixed frequency NRT boost increase 1 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 32, /* Proportional frequency boost decrease 32/256 ~ 12% */ \
+ },\
+ 0, /* Relative adjustement of average freqiency 1/2^0 ~ 100% */ \
+ 0, /* Number of smaple intervals with NRT to trigger boost = 1 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/**
+ * Default DFS algorithm parameters for Video-pipe clock domain
+ */
+#define NVRM_DFS_PARAM_VPIPE_AP20 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 24000, /* Minimum domain frequency 24 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 16000, /* Fixed frequency RT boost increase 16 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 1000, /* Fixed frequency NRT boost increase 1 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ 5, /* Relative adjustement of average freqiency 1/2^5 ~ 3% */ \
+ 3, /* Number of smaple intervals with NRT to trigger boost = 4 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+/**
+ * Default DFS algorithm parameters for EMC clock domain
+ */
+#define NVRM_DFS_PARAM_EMC_AP20 \
+ NvRmFreqMaximum, /* Maximum domain frequency set to h/w limit */ \
+ 16000, /* Minimum domain frequency 16 MHz */ \
+ 1000, /* Frequency change upper band 1 MHz */ \
+ 1000, /* Frequency change lower band 1 MHz */ \
+ { /* RT starvation control parameters */ \
+ 16000, /* Fixed frequency RT boost increase 16 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ { /* NRT starvation control parameters */ \
+ 1000, /* Fixed frequency NRT boost increase 1 MHz */ \
+ 255, /* Proportional frequency boost increase 255/256 ~ 100% */ \
+ 128, /* Proportional frequency boost decrease 128/256 ~ 50% */ \
+ },\
+ 0, /* Relative adjustement of average freqiency 1/2^0 ~ 100% */ \
+ 0, /* Number of smaple intervals with NRT to trigger boost = 1 */ \
+ 1 /* NRT idle cycles threshold = 1 */
+
+
+/**
+ * Defines CPU frequency threshold for slave CPU1 power management:
+ * - CPU1 is turned Off when cpu clock is below ON_MIN
+ * - CPU1 is turned On when cpu clock is above OFF_MAX
+ * If set to 0, the threshold value is derived at run time from the
+ * characterization data
+ */
+#define NVRM_CPU1_ON_MIN_KHZ (0)
+#define NVRM_CPU1_OFF_MAX_KHZ (0)
+
+/// Default low corners for core and dedicated CPU voltages
+#define NVRM_AP20_LOW_CORE_MV (1200)
+#define NVRM_AP20_LOW_CPU_MV (850)
+
+/*****************************************************************************/
+
+/**
+ * Initializes activity monitors within the DFS module. Only activity
+ * monitors are affected. The rest of module's h/w is preserved.
+ *
+ * @param pDfs - A pointer to DFS structure.
+ *
+ * @return NvSuccess if initialization completed successfully
+ * or one of common error codes on failure.
+ */
+NvError NvRmPrivAp20EmcMonitorsInit(NvRmDfs* pDfs);
+
+/**
+ * Deinitializes activity monitors within the DFS module. Only activity
+ * monitors are affected. The rest of module's h/w is preserved.
+ *
+ * @param pDfs - A pointer to DFS structure.
+ */
+void NvRmPrivAp20EmcMonitorsDeinit(NvRmDfs* pDfs);
+
+/**
+ * Starts activity monitors in the DFS module for the next sample interval.
+ *
+ * @param pDfs - A pointer to DFS structure.
+ * @param pDfsKHz - A pointer to current DFS clock frequencies structure.
+ * @param IntervalMs Next sampling interval in ms.
+ */
+void
+NvRmPrivAp20EmcMonitorsStart(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ const NvU32 IntervalMs);
+
+/**
+ * Reads idle count from activity monitors in the DFS module. The monitors are
+ * stopped.
+ *
+ * @param pDfs - A pointer to DFS structure.
+ * @param pDfsKHz - A pointer to current DFS clock frequencies structure.
+ * @param pIdleData - A pointer to idle cycles structure to be filled in with
+ * data read from the monitor.
+ *
+ */
+void
+NvRmPrivAp20EmcMonitorsRead(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfsIdleData* pIdleData);
+
+/**
+ * Changes core and rtc voltages, keeping them in synch
+ *
+ * @param hRm The RM device handle.
+ * @param A pointer to DVS structure.
+ * @param TargetMv Requested core/rtc voltage in mV.
+ *
+ */
+void
+NvRmPrivAp20DvsChangeCoreVoltage(
+ NvRmDeviceHandle hRm,
+ NvRmDvs* pDvs,
+ NvRmMilliVolts TargetMv);
+
+/**
+ * Determines temperature monitoring policy.
+ *
+ * @param TemperatureC Current core temperature in degrees C.
+ * @param pDtt A pointer to dynamic thermal throttling structure.
+ * @param pLowLimit A pointer to the returned variable with low boundary for
+ * temperature out-of-limit interrupt.
+ * @param pHighLimit A pointer to the returned variable with high boundary for
+ * temperature out-of-limit interrupt.
+ * @param pPollMs A pointer to the returned variable with temperature polling
+ * interval in milliseconds.
+ */
+
+void
+NvRmPrivAp20DttGetTcorePolicy(
+ NvS32 TemperatureC,
+ const NvRmDtt* pDtt,
+ NvS32* pLowLimit,
+ NvS32* pHighLimit,
+ NvU32* pPollMs);
+
+/**
+ * Throttles DFS target clocks.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param TemperatureC Current core temperature in degrees C.
+ * @param pCurrentKHz A pointer to current DFS clock frequencies structure.
+ * @param pDfsKHz A pointer to DFS clock structure with target frequencies
+ * on entry, and throttled frequencies on exit.
+ *
+ * @return NV_TRUE if target clocks were throttled, and NV_FALSE otherwise.
+ */
+NvBool
+NvRmPrivAp20DttClockUpdate(
+ NvRmDeviceHandle hRmDevice,
+ NvS32 TemperatureC,
+ const NvRmDfsFrequencies* pCurrentKHz,
+ NvRmDfsFrequencies* pDfsKHz);
+
+/**
+ * Determines PM request to change CPU(s) power state.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCpuSampler Pointer to the DFS CPU clock sampling records
+ * @param CpuKHz CPU clock frequency target
+ *
+ * @return New PM request to change CPU power state
+ */
+NvRmPmRequest
+NvRmPrivAp20GetPmRequest(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDfsSampler* pCpuSampler,
+ NvRmFreqKHz CpuKHz);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // INCLUDED_AP20RM_POWER_DFS_H
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_reloctable.c b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_reloctable.c
new file mode 100644
index 000000000000..75c5004b65a6
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_reloctable.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvrm_init.h"
+#include "common/nvrm_hwintf.h"
+#include "ap20/project_relocation_table.h"
+#include "ap15/ap15rm_private.h"
+
+static NvU32 s_RelocationTable[] =
+{
+ NV_RELOCATION_TABLE_INIT
+};
+
+NvU32 *
+NvRmPrivAp20GetRelocationTable( NvRmDeviceHandle hDevice )
+{
+ return s_RelocationTable;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/core/common/Makefile b/arch/arm/mach-tegra/nvrm/core/common/Makefile
new file mode 100644
index 000000000000..f51b86beb419
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/Makefile
@@ -0,0 +1,27 @@
+ccflags-y += -DNV_IS_AVP=0
+ccflags-y += -DNV_OAL=0
+ccflags-y += -DNV_USE_FUSE_CLOCK_ENABLE=0
+ifeq ($(CONFIG_MACH_TEGRA_GENERIC_DEBUG),y)
+ccflags-y += -DNV_DEBUG=1
+else
+ccflags-y += -DNV_DEBUG=0
+endif
+
+obj-y += nvrm_pinmux.o
+obj-y += nvrm_heap_simple.o
+obj-y += nvrm_memmgr.o
+obj-y += nvrm_heap_carveout.o
+obj-y += nvrm_heap_iram.o
+obj-y += nvrm_keylist.o
+obj-y += nvrm_configuration.o
+obj-y += nvrm_pmu.o
+obj-y += nvrm_module.o
+obj-y += nvrm_module_common.o
+obj-y += nvrm_hwintf.o
+obj-y += nvrm_chiplib.o
+obj-y += nvrm_clocks_limits.o
+obj-y += nvrm_clocks_limits_stub.o
+obj-y += nvrm_power.o
+obj-y += nvrm_power_dfs.o
+obj-y += nvrm_rmctrace.o
+obj-y += nvrm_relocation_table.o
diff --git a/arch/arm/mach-tegra/nvrm/core/common/chiplib_interface.h b/arch/arm/mach-tegra/nvrm/core/common/chiplib_interface.h
new file mode 100644
index 000000000000..9b27133fd7d2
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/chiplib_interface.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2006-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_CHIPLIB_INTERFACE_H
+#define INCLUDED_CHIPLIB_INTERFACE_H
+
+#include "nvcommon.h"
+
+// IIfaceObject and bootstrapping logic
+typedef enum
+{
+ IID_QUERY_IFACE = 0,
+ IID_CHIP_IFACE = 1,
+ IID_INTERRUPT_IFACE = 8,
+ IID_BUSMEM_IFACE = 16,
+ IID_LAST_IFACE = 0xFFFF
+} IID_TYPE;
+
+struct IIfaceObjectRec;
+
+typedef struct IIfaceObjectVtableRec
+{
+ void *Unused1;
+ void *Unused2;
+
+ // IIfaceObject interface
+ void (*AddRef)(struct IIfaceObjectRec *pThis);
+ void (*Release)(struct IIfaceObjectRec *pThis);
+ struct IIfaceObjectRec *(*QueryIface)(struct IIfaceObjectRec *pThis,
+ IID_TYPE id);
+} IIfaceObjectVtable;
+
+typedef struct IIfaceObjectRec
+{
+ IIfaceObjectVtable *pVtable;
+} IIfaceObject;
+
+typedef IIfaceObject *(*QueryIfaceFn)(IID_TYPE id);
+#define QUERY_PROC_NAME "QueryIface"
+
+// IChip
+typedef enum
+{
+ ELEVEL_UNKNOWN = 0,
+ ELEVEL_HW = 1,
+ ELEVEL_RTL = 2,
+ ELEVEL_CMODEL = 3
+} ELEVEL;
+
+struct IChipRec;
+
+typedef struct IChipVtableRec
+{
+ void *Unused1;
+ void *Unused2;
+
+ // IIfaceObject interface
+ void (*AddRef)(struct IChipRec *pThis);
+ void (*Release)(struct IChipRec *pThis);
+ IIfaceObject *(*QueryIface)(struct IChipRec *pThis, IID_TYPE id);
+
+ void *Unused3;
+
+ // IChip interface
+ int (*Startup)(struct IChipRec *pThis, IIfaceObject* system, char** argv,
+ int argc);
+ void (*Shutdown)(struct IChipRec *pThis);
+ int (*AllocSysMem)(struct IChipRec *pThis, int numBytes, NvU32* physAddr);
+ void (*FreeSysMem)(struct IChipRec *pThis, NvU32 physAddr);
+ void (*ClockSimulator)(struct IChipRec *pThis, NvS32 numClocks);
+ void (*Delay)(struct IChipRec *pThis, NvU32 numMicroSeconds);
+ int (*EscapeWrite)(struct IChipRec *pThis, char* path, NvU32 index,
+ NvU32 size, NvU32 value);
+ int (*EscapeRead)(struct IChipRec *pThis, char* path, NvU32 index,
+ NvU32 size, NvU32* value);
+ int (*FindPCIDevice)(struct IChipRec *pThis, NvU16 vendorId,
+ NvU16 deviceId, int index, NvU32* address);
+ int (*FindPCIClassCode)(struct IChipRec *pThis, NvU32 classCode, int index,
+ NvU32* address);
+ int (*GetSimulatorTime)(struct IChipRec *pThis, NvU64* simTime);
+ double (*GetSimulatorTimeUnitsNS)(struct IChipRec *pThis);
+ int (*GetPCIBaseAddress)(struct IChipRec *pThis, NvU32 cfgAddr, int index,
+ NvU32* pAddress, NvU32* pSize);
+ ELEVEL (*GetChipLevel)(struct IChipRec *pThis);
+} IChipVtable;
+
+typedef struct IChipRec
+{
+ IChipVtable *pVtable;
+} IChip;
+
+// IBusMem
+typedef enum
+{
+ BUSMEM_HANDLED = 0,
+ BUSMEM_NOTHANDLED = 1,
+} BusMemRet;
+
+struct IBusMemRec;
+
+typedef struct IBusMemVtableRec
+{
+ void *Unused1;
+ void *Unused2;
+
+ // IIfaceObject interface
+ void (*AddRef)(struct IBusMemRec *pThis);
+ void (*Release)(struct IBusMemRec *pThis);
+ IIfaceObject *(*QueryIface)(struct IBusMemRec *pThis, IID_TYPE id);
+
+ void *Unused3;
+
+ // IBusMem interface
+ BusMemRet (*BusMemWrBlk)(struct IBusMemRec *pThis, NvU64 address,
+ const void *appdata, NvU32 count);
+ BusMemRet (*BusMemRdBlk)(struct IBusMemRec *pThis, NvU64 address,
+ void *appdata, NvU32 count);
+ BusMemRet (*BusMemCpBlk)(struct IBusMemRec *pThis, NvU64 dest,
+ NvU64 source, NvU32 count);
+ BusMemRet (*BusMemSetBlk)(struct IBusMemRec *pThis, NvU64 address,
+ NvU32 size, void* data, NvU32 data_size);
+} IBusMemVtable;
+
+typedef struct IBusMemRec
+{
+ IBusMemVtable *pVtable;
+} IBusMem;
+
+struct IInterruptRec;
+
+typedef struct IInterruptVtableRec
+{
+ void *Unused1;
+ void *Unused2;
+
+ // IIfaceObject interface
+ void (*AddRef)(struct IInterruptRec *pThis);
+ void (*Release)(struct IInterruptRec *pThis);
+ IIfaceObject *(*QueryIface)(struct IInterruptRec *pThis, IID_TYPE id);
+
+ void *Unused3;
+
+ // IInterrupt interface
+ void (*HandleInterrupt)( struct IInterruptRec *pThis );
+
+} IInterruptVtable;
+
+typedef struct IInterruptRec
+{
+ IInterruptVtable *pVtable;
+} IInterrupt;
+
+#endif // INCLUDED_CHIPLIB_INTERFACE_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_chipid.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_chipid.h
new file mode 100644
index 000000000000..52742c0a287e
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_chipid.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_CHIPID_H
+#define INCLUDED_NVRM_CHIPID_H
+
+#include "nvcommon.h"
+#include "nvrm_init.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+/* Chip Id */
+typedef enum
+{
+ NvRmChipFamily_Gpu = 0,
+ NvRmChipFamily_Handheld = 1,
+ NvRmChipFamily_BrChips = 2,
+ NvRmChipFamily_Crush = 3,
+ NvRmChipFamily_Mcp = 4,
+ NvRmChipFamily_Ck = 5,
+ NvRmChipFamily_Vaio = 6,
+ NvRmChipFamily_HandheldSoc = 7,
+
+ NvRmChipFamily_Force32 = 0x7FFFFFFF,
+} NvRmChipFamily;
+
+typedef enum
+{
+ NvRmCaps_HasFalconInterruptController = 0,
+ NvRmCaps_Has128bitInterruptSerializer,
+ NvRmCaps_Num,
+ NvRmCaps_Force32 = 0x7FFFFFFF,
+} NvRmCaps;
+
+typedef struct NvRmChipIdRec
+{
+ NvU16 Id;
+ NvRmChipFamily Family;
+ NvU8 Major;
+ NvU8 Minor;
+ NvU16 SKU;
+
+ /* the following only apply for emulation -- Major will be 0 and
+ * Minor is either 0 for quickturn or 1 for fpga
+ */
+ NvU16 Netlist;
+ NvU16 Patch;
+
+ /* List of features and bug WARs */
+ NvU32 Flags[(NvRmCaps_Num+31)/32];
+} NvRmChipId;
+
+#define NVRM_IS_CAP_SET(h, bit) (((h)->ChipId.Flags)[(bit) >> 5] & (1 << ((bit) & 31)))
+#define NVRM_CAP_SET(h, bit) (((h)->ChipId.Flags)[(bit) >> 5] |= (1U << ((bit) & 31U)))
+#define NVRM_CAP_CLEAR(h, bit) (((h)->ChipId.Flags)[(bit) >> 5] &= ~(1U << ((bit) & 31U)))
+
+/**
+ * Gets the chip id.
+ *
+ * @param hDevice The RM instance
+ */
+NvRmChipId *
+NvRmPrivGetChipId( NvRmDeviceHandle hDevice );
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // INCLUDED_NVRM_CHIPID_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_chiplib.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_chiplib.c
new file mode 100644
index 000000000000..5df00be58843
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_chiplib.c
@@ -0,0 +1,846 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvrm_hardware_access.h"
+#include "nvassert.h"
+#include "nvos.h"
+#include "chiplib_interface.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_init.h"
+
+/**
+ * NOTE: newer versions of chiplib (t30) use GCC 4, which changes the virtual
+ * table layout. To get around this, C wrapper functions were added, so this
+ * needs to check for 'QueryIface_C', which will return a struct full of
+ * C function wrappers rather than the old method, which is to overlay a C
+ * structure manually.
+ */
+
+/* table for virtual to physical lookups */
+typedef struct RmAddrMap_t
+{
+ NvRmPhysAddr phys;
+ void *virt;
+ size_t size;
+} RmAddrMap;
+
+#define RM_ADDR_MAP_SIZE 256
+
+static RmAddrMap s_AddrMap[ RM_ADDR_MAP_SIZE ];
+static NvBool s_Shutdown;
+
+static NvOsLibraryHandle s_chiplib = 0;
+static IChip *s_IChip = 0;
+static IBusMem *s_IBusMem = 0;
+static NvOsMutexHandle s_ChiplibMutex = 0;
+
+static NvOsThreadHandle s_clockThreadId = NULL;
+static volatile NvBool s_bShutdownClockThread = NV_FALSE;
+
+static NvOsMutexHandle s_simIstMutex;
+
+/**
+ * IInterrupt support
+ */
+static IInterrupt s_Interrupt;
+static void AddRef_IInterrupt(struct IInterruptRec *pThis) { }
+static void Release_IInterrupt(struct IInterruptRec *pThis) { }
+
+static void
+NvRmPrivChiplibInterruptHandler( void );
+
+static IIfaceObject *
+QueryIface_IInterrupt(struct IInterruptRec *pThis,
+ IID_TYPE id)
+{
+ IIfaceObject *ret;
+
+ switch (id) {
+ case IID_INTERRUPT_IFACE:
+ ret = (IIfaceObject *)&s_Interrupt;
+ break;
+ case IID_CHIP_IFACE:
+ // fall through
+ case IID_BUSMEM_IFACE:
+ // fall through
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * Note about deadlock: the interrupt handler must not be called with
+ * s_ChiplibMutex locked, otherwise there are lock ordering issues. The
+ * best solution is to force cooperative threading. The second best is to
+ * use an indirection thread to actually execute the handler.
+ */
+
+static ChiplibHandleInterrupt s_HandleInterrupt;
+static NvOsSemaphoreHandle s_IsrSemaphore;
+static NvOsThreadHandle s_IsrThread;
+
+static void
+NvRmPrivChiplibInterruptThread( void *args )
+{
+ for( ;; )
+ {
+ NvOsSemaphoreWait( s_IsrSemaphore );
+ if( s_Shutdown )
+ {
+ break;
+ }
+
+ if( !s_IChip )
+ {
+ break;
+ }
+
+ if( s_HandleInterrupt )
+ {
+ s_HandleInterrupt();
+ }
+ }
+}
+
+static void
+HandleInterrupt_IInterrupt(struct IInterruptRec *pThis)
+{
+ if( s_IsrThread && s_IsrSemaphore )
+ {
+ NvOsSemaphoreSignal( s_IsrSemaphore );
+ }
+}
+
+static void
+NvRmPrivChiplibClockthread( void *args )
+{
+ NvError err;
+ NvBool bSleep = NV_FALSE;
+
+ err = NvOsThreadSetLowPriority();
+ if( err != NvSuccess )
+ {
+ bSleep = NV_TRUE;
+ }
+
+ while( s_bShutdownClockThread == NV_FALSE )
+ {
+ if( s_IChip )
+ {
+ NvS32 clocks;
+ if( bSleep )
+ {
+ clocks = 500;
+ } else
+ {
+ clocks = 32;
+ }
+
+ NV_ASSERT(s_ChiplibMutex);
+ NvOsMutexLock(s_ChiplibMutex);
+ s_IChip->pVtable->ClockSimulator(s_IChip, clocks);
+ NvOsMutexUnlock(s_ChiplibMutex);
+ }
+
+ /* thread package might not support low priority threads, emulate it.
+ */
+ if( bSleep )
+ {
+ NvOsSleepMS( 50 );
+ }
+ else
+ {
+ NvOsThreadYield();
+ }
+ }
+}
+
+static NvBool
+NvRmPrivParseCommandline(const char *cmdline, int *argc, char ***argv,
+ char ***argvbuf, char **pCopy);
+
+#if NV_DEF_ENVIRONMENT_SUPPORTS_SIM
+NvBool
+NvRmIsSimulation(void)
+{
+ return (NvBool)(s_chiplib != NULL);
+}
+#endif
+
+typedef void *(*QueryIfaceCFn)( IID_TYPE id );
+
+NvError
+NvRmPrivChiplibStartup(const char *lib, const char *cmdline,
+ ChiplibHandleInterrupt handler)
+{
+ NvError err;
+ void *sym;
+ QueryIfaceFn query;
+ QueryIfaceCFn c_wrap;
+ char *copy = 0;
+ char **argvbuf = 0;
+ char **argv = 0;
+ int argc = 0;
+ int e;
+
+ NV_ASSERT(lib);
+
+ if( lib[0] == 0 )
+ {
+ /* no chiplib defined */
+ return NvSuccess;
+ }
+
+ /* all chiplib accesses must be synchronized - do not use a multi-process
+ * mutex since that prevents other simulation instances on the same
+ * machine.
+ */
+ err = NvOsMutexCreate( &s_ChiplibMutex );
+ if (err != NvSuccess)
+ {
+ goto fail;
+ }
+
+ err = NvOsSemaphoreCreate( &s_IsrSemaphore, 0 );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ s_Shutdown = NV_FALSE;
+ err = NvOsThreadCreate( NvRmPrivChiplibInterruptThread, 0, &s_IsrThread );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ /* open the chiplib .so */
+ err = NvOsLibraryLoad( lib, &s_chiplib );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ /* try to find the C wrapper struct, if not, fallback to the old way of
+ * doing thigs.
+ */
+ c_wrap = NvOsLibraryGetSymbol( s_chiplib, "QueryIface_C" );
+ if( c_wrap )
+ {
+ s_IChip = (IChip *)c_wrap( IID_CHIP_IFACE );
+ }
+ else
+ {
+ /* get a chiplib instance - QUERY_PROC_NAME, etc., are from chiplib
+ * headers.
+ */
+ sym = NvOsLibraryGetSymbol( s_chiplib, QUERY_PROC_NAME );
+ if( sym == NULL )
+ {
+ goto fail;
+ }
+
+ query = (QueryIfaceFn)sym;
+ s_IChip = (IChip *)query( IID_CHIP_IFACE );
+ }
+ if( !s_IChip )
+ {
+ goto fail;
+ }
+
+ // FIXME: should probably check for errors
+ (void)NvRmPrivParseCommandline(cmdline, &argc, &argv, &argvbuf, &copy);
+
+ /* setup the interrupt handler */
+ s_Interrupt.pVtable = NvOsAlloc(sizeof(IInterruptVtable));
+ if( !s_Interrupt.pVtable )
+ {
+ goto fail;
+ }
+ s_Interrupt.pVtable->AddRef = AddRef_IInterrupt;
+ s_Interrupt.pVtable->Release = Release_IInterrupt;
+ s_Interrupt.pVtable->QueryIface = QueryIface_IInterrupt;
+ s_Interrupt.pVtable->HandleInterrupt = HandleInterrupt_IInterrupt;
+
+ /* Use the default handler if the passed handler is NULL */
+ if( handler == NULL )
+ {
+ s_HandleInterrupt = NvRmPrivChiplibInterruptHandler;
+ }
+
+ /* start chiplib */
+ e = s_IChip->pVtable->Startup(s_IChip, (IIfaceObject *)&s_Interrupt,
+ argv, argc );
+ if( e )
+ {
+ goto fail;
+ }
+
+ /* get the bus interface */
+ s_IBusMem = (IBusMem *)s_IChip->pVtable->QueryIface( s_IChip,
+ IID_BUSMEM_IFACE );
+ if( !s_IBusMem )
+ {
+ goto fail;
+ }
+
+ if( NvRmIsSimulation() )
+ {
+ s_bShutdownClockThread = NV_FALSE;
+
+ err = NvOsMutexCreate( &s_simIstMutex );
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ err = NvOsThreadCreate(NvRmPrivChiplibClockthread, NULL,
+ &s_clockThreadId);
+ if (err != NvSuccess)
+ {
+ goto fail;
+ }
+ }
+
+ NvOsFree( copy );
+ NvOsFree( argvbuf );
+ return NvSuccess;
+
+fail:
+ NvOsFree( copy );
+ NvOsFree( argvbuf );
+ NvOsMutexDestroy(s_ChiplibMutex);
+ NvOsLibraryUnload(s_chiplib);
+ s_chiplib = 0;
+ s_IChip = 0;
+ s_IBusMem = 0;
+
+ if( s_IsrSemaphore && s_IsrThread )
+ {
+ NvOsSemaphoreSignal( s_IsrSemaphore );
+ NvOsThreadJoin( s_IsrThread );
+ }
+ NvOsSemaphoreDestroy( s_IsrSemaphore );
+ s_IsrSemaphore = 0;
+
+ return NvError_RmInitFailed;
+}
+
+void
+NvRmPrivChiplibShutdown(void)
+{
+ /* First shtdown the interrupt thread */
+ if( s_IsrSemaphore && s_IsrThread )
+ {
+ s_Shutdown = NV_TRUE;
+ NvOsSemaphoreSignal( s_IsrSemaphore );
+ NvOsThreadJoin( s_IsrThread );
+ s_Shutdown = NV_FALSE;
+ }
+ NvOsSemaphoreDestroy( s_IsrSemaphore );
+ s_IsrSemaphore = 0;
+
+ /* next shutdown the clocking thread */
+ if (NvRmIsSimulation())
+ {
+ s_bShutdownClockThread = NV_TRUE;
+ NvOsThreadJoin( s_clockThreadId );
+ NvOsMutexDestroy(s_simIstMutex);
+ s_simIstMutex = 0;
+ s_clockThreadId = 0;
+ }
+
+ /* Finally shutdown the chiplib */
+ NvOsMutexLock(s_ChiplibMutex);
+ if (s_IChip)
+ {
+ s_IChip->pVtable->Shutdown(s_IChip);
+ s_IChip->pVtable->Release(s_IChip);
+ s_IChip = NULL;
+ }
+ if (s_IBusMem)
+ {
+ s_IBusMem->pVtable->Release(s_IBusMem);
+ s_IBusMem = NULL;
+ }
+
+ if (s_Interrupt.pVtable)
+ {
+ s_Interrupt.pVtable->Release(&s_Interrupt);
+ NvOsFree(s_Interrupt.pVtable);
+ }
+ NvOsMutexDestroy(s_ChiplibMutex);
+}
+
+static NvBool
+NvRmPrivParseCommandline( const char *cmdline, int *argc, char ***argv,
+ char ***pArgv, char **pCopy )
+{
+ /* keep some amount of stack space to prevent dynamic allocation in the
+ * average case.
+ */
+ #define TOKEN_SIZE_GUESS 16
+
+ static char *s_argv[ TOKEN_SIZE_GUESS ];
+ char *env = 0;
+ char *start = 0;
+ char *end = 0;
+ char *copy = 0;
+ NvU32 size;
+ NvU32 len;
+ NvU32 index;
+
+ /* get the command line */
+ env = (char *)cmdline;
+
+ /*
+ * this needs to do two passes over the environment string. can't think
+ * of a way to do it with one pass without using realloc. performace
+ * should be ok either way and doesn't really matter anyway.
+ *
+ * just allocate one copy of the env string, then replace the spaces
+ * with nulls, assign the tokens into argv - this avoids strcpy and
+ * an allocation per token.
+ */
+
+ /* count the number of tokens and env string length */
+ size = 1; /* (should) always be at least one token */
+ len = 0;
+ start = env;
+ while (*start)
+ {
+ if (*start == ' ')
+ {
+ size++;
+ }
+
+ start++;
+ len++;
+ }
+
+ if (len == 0)
+ {
+ return NV_FALSE;
+ }
+
+ /* allocate argv */
+ size++; /* executable name */
+ size++; /* null terminate array */
+ if (size >= TOKEN_SIZE_GUESS)
+ {
+ *argv = NvOsAlloc(size * sizeof(char *));
+ if (!(*argv))
+ {
+ return NV_FALSE;
+ }
+ *pArgv = *argv;
+ }
+ else
+ {
+ /* guess that most argv arrays are TOKEN_SIZE_GUESS or less long */
+ *argv = s_argv;
+ }
+
+ /* assign argc */
+ *argc = size - 1; /* don't include null termination */
+
+ (*argv)[ size ] = 0;
+ // FIXME: should get the execuable name
+ (*argv)[ 0 ] = "bogus"; /* executable name */
+
+ /* allocate and copy the string */
+ len++;
+ copy = NvOsAlloc(len);
+ if (copy == 0)
+ {
+ goto fail;
+ }
+
+ *pCopy = copy;
+
+ NvOsStrncpy(copy, env, len - 1);
+ copy[ len - 1 ] = 0;
+
+ /* fill argv - find each token - assign to argv */
+ index = 1;
+ start = copy;
+ while (*start)
+ {
+ /* find a token */
+ end = start;
+ while (*end && *end != ' ')
+ {
+ end++;
+ }
+
+ /* assign to argv */
+ (*argv)[ index ] = start;
+ index++;
+
+ start = end;
+ if (*end == ' ')
+ {
+ /* replace space with null and move to next token */
+ *end = 0;
+ start++;
+ }
+ }
+
+ return NV_TRUE;
+
+fail:
+ NvOsFree(*pArgv);
+ *pArgv = 0;
+ NvOsFree(copy);
+ *pCopy = 0;
+
+ #undef TOKEN_SIZE_GUESS
+
+ return NV_FALSE;
+}
+
+void *
+NvRmPrivChiplibMap(NvRmPhysAddr addr, size_t size)
+{
+ NvError err;
+ void *virt;
+ NvU32 i;
+ RmAddrMap *map = 0;
+
+ /* map some bogus memory with guard page */
+ err = NvOsPhysicalMemMap(addr, size + 4096, NvOsMemAttribute_WriteBack,
+ NVOS_MEM_NONE, &virt);
+ if (err != NvSuccess)
+ {
+ return 0;
+ }
+
+ /* find a free entry */
+ for (i = 0; i < RM_ADDR_MAP_SIZE; i++)
+ {
+ if (s_AddrMap[i].phys == 0 &&
+ s_AddrMap[i].virt == 0)
+ {
+ map = &s_AddrMap[i];
+ break;
+ }
+ }
+
+ if (!map)
+ {
+ NvOsPhysicalMemUnmap(virt, size + 4096);
+ return 0;
+ }
+
+ /* setup entry */
+ map->phys = addr;
+ map->virt = virt;
+ map->size = size;
+
+ return virt;
+}
+
+void
+NvRmPrivChiplibUnmap(void *addr)
+{
+ NvU32 i;
+
+ if( !addr )
+ {
+ return;
+ }
+
+ for( i = 0; i < RM_ADDR_MAP_SIZE; i++ )
+ {
+ if( s_AddrMap[i].virt == addr )
+ {
+ /* unmap (don't forget the guard page) */
+ NvOsPhysicalMemUnmap(addr, s_AddrMap[i].size + 4096);
+ NvOsMemset(&s_AddrMap[i], 0, sizeof(s_AddrMap[i]));
+ break;
+ }
+ }
+}
+
+static NvBool
+NvRmPrivVirtToPhys(const void *virt, NvRmPhysAddr *phys)
+{
+ NvU32 i;
+ RmAddrMap *map;
+ NvRmPhysAddr addr;
+ NvRmPhysAddr base;
+
+ addr = (NvRmPhysAddr)virt;
+
+ /* find the address range and convert to a physical address, use
+ * physical address type just to be safe.
+ */
+ for( i = 0; i < RM_ADDR_MAP_SIZE; i++ )
+ {
+ map = &s_AddrMap[i];
+ base = (NvRmPhysAddr)map->virt;
+ if( addr >= base && addr < (base + map->size) )
+ {
+ *phys = addr - base + map->phys;
+ return NV_TRUE;
+ }
+ }
+
+ return NV_FALSE;
+}
+
+void NvWrite08(void *addr, NvU8 data)
+{
+ BusMemRet err;
+ NvRmPhysAddr phys;
+
+ if (!s_IBusMem || !NvRmPrivVirtToPhys(addr, &phys))
+ {
+ *(NvU8 *)addr = data;
+ }
+ else
+ {
+ NvOsMutexLock(s_ChiplibMutex);
+ err = s_IBusMem->pVtable->BusMemWrBlk(s_IBusMem, phys, &data,
+ sizeof(data));
+ NV_ASSERT(err == BUSMEM_HANDLED);
+ NvOsMutexUnlock(s_ChiplibMutex);
+ }
+}
+
+void NvWrite16(void *addr, NvU16 data)
+{
+ BusMemRet err;
+ NvRmPhysAddr phys;
+
+ if (!s_IBusMem || !NvRmPrivVirtToPhys(addr, &phys))
+ {
+ *(NvU16 *)addr = data;
+ }
+ else
+ {
+ NvOsMutexLock(s_ChiplibMutex);
+ err = s_IBusMem->pVtable->BusMemWrBlk(s_IBusMem, phys, &data,
+ sizeof(data));
+ NV_ASSERT(err == BUSMEM_HANDLED);
+ NvOsMutexUnlock(s_ChiplibMutex);
+ }
+}
+
+void NvWrite32(void *addr, NvU32 data)
+{
+ BusMemRet err;
+ NvRmPhysAddr phys;
+
+ if (!s_IBusMem || !NvRmPrivVirtToPhys(addr, &phys))
+ {
+ *(NvU32 *)addr = data;
+ }
+ else
+ {
+ NvOsMutexLock(s_ChiplibMutex);
+ err = s_IBusMem->pVtable->BusMemWrBlk(s_IBusMem, phys, &data,
+ sizeof(data));
+ NV_ASSERT(err == BUSMEM_HANDLED);
+ NvOsMutexUnlock(s_ChiplibMutex);
+ }
+}
+
+void NvWrite64(void *addr, NvU64 data)
+{
+ BusMemRet err;
+ NvRmPhysAddr phys;
+
+ if (!s_IBusMem || !NvRmPrivVirtToPhys(addr, &phys))
+ {
+ *(NvU64 *)addr = data;
+ }
+ else
+ {
+ NvOsMutexLock(s_ChiplibMutex);
+ err = s_IBusMem->pVtable->BusMemWrBlk(s_IBusMem, phys, &data,
+ sizeof(data));
+ NV_ASSERT(err == BUSMEM_HANDLED);
+ NvOsMutexUnlock(s_ChiplibMutex);
+ }
+}
+
+NvU8 NvRead08(void *addr)
+{
+ BusMemRet err;
+ NvRmPhysAddr phys;
+ NvU8 ret;
+
+ if (!s_IBusMem || !NvRmPrivVirtToPhys(addr, &phys))
+ {
+ ret = *(NvU8 *)addr;
+ }
+ else
+ {
+ NvOsMutexLock(s_ChiplibMutex);
+ err = s_IBusMem->pVtable->BusMemRdBlk(s_IBusMem, phys, &ret,
+ sizeof(ret));
+ NV_ASSERT(err == BUSMEM_HANDLED);
+ NvOsMutexUnlock(s_ChiplibMutex);
+ }
+
+ return ret;
+}
+
+NvU16 NvRead16(void *addr)
+{
+ BusMemRet err;
+ NvRmPhysAddr phys;
+ NvU16 ret;
+
+ if (!s_IBusMem || !NvRmPrivVirtToPhys(addr, &phys))
+ {
+ ret = *(NvU16 *)addr;
+ }
+ else
+ {
+ NvOsMutexLock(s_ChiplibMutex);
+ err = s_IBusMem->pVtable->BusMemRdBlk(s_IBusMem, phys, &ret,
+ sizeof(ret));
+ NV_ASSERT(err == BUSMEM_HANDLED);
+ NvOsMutexUnlock(s_ChiplibMutex);
+ }
+
+ return ret;
+}
+
+NvU32 NvRead32(void *addr)
+{
+ BusMemRet err;
+ NvRmPhysAddr phys;
+ NvU32 ret;
+
+ if (!s_IBusMem || !NvRmPrivVirtToPhys(addr, &phys))
+ {
+ ret = *(NvU32 *)addr;
+ }
+ else
+ {
+ NvOsMutexLock(s_ChiplibMutex);
+ err = s_IBusMem->pVtable->BusMemRdBlk(s_IBusMem, phys, &ret,
+ sizeof(ret));
+ NV_ASSERT(err == BUSMEM_HANDLED);
+ NvOsMutexUnlock(s_ChiplibMutex);
+ }
+
+ return ret;
+}
+
+NvU64 NvRead64(void *addr)
+{
+ BusMemRet err;
+ NvRmPhysAddr phys;
+ NvU64 ret;
+
+ if (!s_IBusMem || !NvRmPrivVirtToPhys(addr, &phys))
+ {
+ ret = *(NvU64 *)addr;
+ }
+ else
+ {
+ NvOsMutexLock(s_ChiplibMutex);
+ err = s_IBusMem->pVtable->BusMemRdBlk(s_IBusMem, phys, &ret,
+ sizeof(ret));
+ NV_ASSERT(err == BUSMEM_HANDLED);
+ NvOsMutexUnlock(s_ChiplibMutex);
+ }
+
+ return ret;
+}
+
+void NvWriteBlk(void *dst, const void *src, NvU32 length)
+{
+ BusMemRet err;
+ NvRmPhysAddr phys;
+
+ if (!s_IBusMem || !NvRmPrivVirtToPhys(dst, &phys))
+ {
+ NvOsMemcpy(dst, src, length);
+ }
+ else
+ {
+ NvOsMutexLock(s_ChiplibMutex);
+ err = s_IBusMem->pVtable->BusMemWrBlk(s_IBusMem, phys, src, length);
+ NV_ASSERT(err == BUSMEM_HANDLED);
+ NvOsMutexUnlock(s_ChiplibMutex);
+ }
+}
+
+void NvReadBlk(void *dst, const void *src, NvU32 length)
+{
+ BusMemRet err;
+ NvRmPhysAddr phys;
+
+ if (!s_IBusMem || !NvRmPrivVirtToPhys(src, &phys))
+ {
+ NvOsMemcpy(dst, src, length);
+ }
+ else
+ {
+ NvOsMutexLock(s_ChiplibMutex);
+ err = s_IBusMem->pVtable->BusMemRdBlk(s_IBusMem, phys, dst, length);
+ NV_ASSERT(err == BUSMEM_HANDLED);
+ NvOsMutexUnlock(s_ChiplibMutex);
+ }
+}
+
+extern void
+NvRmPrivHandleOsInterrupt( void *arg );
+
+static void
+NvRmPrivChiplibInterruptHandler( void )
+{
+ if (NvRmIsSimulation())
+ {
+ NvOsMutexLock(s_simIstMutex);
+ }
+
+ /* Chiplib and AOS share the interrpt handling code.
+ * No chiplib interrupt support for wince and linux ARM port
+ */
+#if !NVCPU_IS_ARM
+ NvRmPrivHandleOsInterrupt(NULL);
+#endif
+
+ if (NvRmIsSimulation())
+ {
+ NvOsMutexUnlock(s_simIstMutex);
+ }
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_chiplib.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_chiplib.h
new file mode 100644
index 000000000000..b617e7b41c25
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_chiplib.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_CHIPLIB_H
+#define INLCUDED_NVRM_CHIPLIB_H
+
+#include "nvcommon.h"
+#include "nvrm_hardware_access.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+/**
+ * Chiplib interrupt handler function.
+ */
+typedef void (* ChiplibHandleInterrupt)( void );
+
+#if NV_DEF_ENVIRONMENT_SUPPORTS_SIM == 1
+NvBool NvRmIsSimulation(void);
+#else
+#define NvRmIsSimulation() NV_FALSE
+#endif
+
+/**
+ * starts chiplib.
+ *
+ * @param lib The chiplib name
+ * @param cmdline The chiplib command line
+ * @param handle The interrupt handler - will be called by chiplib
+ */
+NvError
+NvRmPrivChiplibStartup( const char *lib, const char *cmdline,
+ ChiplibHandleInterrupt handler );
+
+/**
+ * stops chiplib.
+ */
+void
+NvRmPrivChiplibShutdown( void );
+
+/**
+ * maps a bogus virtual address to a physical address.
+ *
+ * @param addr The physical address to map
+ * @param size The size of the mapping
+ */
+void *
+NvRmPrivChiplibMap( NvRmPhysAddr addr, size_t size );
+
+/**
+ * unmaps a previously mapped pointer from NvRmPrivChiplibMap.
+ *
+ * @param addr The virtual address to unmap
+ */
+void
+NvRmPrivChiplibUnmap( void *addr );
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_clockids.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_clockids.h
new file mode 100644
index 000000000000..79364f3f47eb
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_clockids.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/** @file ap15rm_clockids.h
+ Clock List & string names
+*/
+
+/* This is the list of all clock sources available on AP15 and AP20.
+ */
+
+// 32 KHz clock - A.K.A relaxation oscillator.
+NVRM_CLOCK_SOURCE('C', 'l', 'k', 'S', ' ', ' ', ' ', ' ', ClkS)
+// Main clock (crystal or input)
+NVRM_CLOCK_SOURCE('C', 'l', 'k', 'M', ' ', ' ', ' ', ' ', ClkM)
+// Always double the Clock M
+NVRM_CLOCK_SOURCE('C', 'l', 'k', 'D', ' ', ' ', ' ', ' ', ClkD)
+
+// PLL clocks
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'A', '0', ' ', ' ', ' ', PllA0)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'A', '1', ' ', ' ', ' ', PllA1)
+
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'C', '0', ' ', ' ', ' ', PllC0)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'C', '1', ' ', ' ', ' ', PllC1)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'D', '0', ' ', ' ', ' ', PllD0)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'E', '0', ' ', ' ', ' ', PllE0)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'M', '0', ' ', ' ', ' ', PllM0)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'M', '1', ' ', ' ', ' ', PllM1)
+
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'P', '0', ' ', ' ', ' ', PllP0)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'P', '1', ' ', ' ', ' ', PllP1)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'P', '2', ' ', ' ', ' ', PllP2)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'P', '3', ' ', ' ', ' ', PllP3)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'P', '4', ' ', ' ', ' ', PllP4)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'S', '0', ' ', ' ', ' ', PllS0)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'U', '0', ' ', ' ', ' ', PllU0)
+NVRM_CLOCK_SOURCE('P', 'l', 'l', 'X', '0', ' ', ' ', ' ', PllX0)
+
+// External and recovered bit clock sources
+NVRM_CLOCK_SOURCE('E', 'x', 't', 'S', 'p', 'd', 'f', ' ', ExtSpdf)
+NVRM_CLOCK_SOURCE('E', 'x', 't', 'I', '2', 's', '1', ' ', ExtI2s1)
+NVRM_CLOCK_SOURCE('E', 'x', 't', 'I', '2', 's', '2', ' ', ExtI2s2)
+NVRM_CLOCK_SOURCE('E', 'x', 't', 'A', 'c', '9', '7', ' ', ExtAc97)
+NVRM_CLOCK_SOURCE('E', 'x', 't', 'A', 'u', 'd', 'i', '1', ExtAudio1)
+NVRM_CLOCK_SOURCE('E', 'x', 't', 'A', 'u', 'd', 'i', '2', ExtAudio2)
+NVRM_CLOCK_SOURCE('E', 'x', 't', 'V', 'i', ' ', ' ', ' ', ExtVi)
+
+// Audio Clocks
+NVRM_CLOCK_SOURCE('A', 'u', 'd', 'i', 'S', 'y', 'n', 'c', AudioSync)
+NVRM_CLOCK_SOURCE('M', 'p', 'e', 'A', 'u', 'd', 'o', ' ', MpeAudio)
+
+// Internal bus sources
+NVRM_CLOCK_SOURCE('C', 'p', 'u', 'B', 'u', 's', ' ', ' ', CpuBus)
+NVRM_CLOCK_SOURCE('C', 'p', 'u', 'B', 'r', 'd', 'g', ' ', CpuBridge)
+NVRM_CLOCK_SOURCE('S', 'y', 's', 't', 'B', 'u', 's', ' ', SystemBus)
+NVRM_CLOCK_SOURCE('A', 'h', 'B', 'u', 's', ' ', ' ', ' ', Ahb)
+NVRM_CLOCK_SOURCE('A', 'p', 'B', 'u', 's', ' ', ' ', ' ', Apb)
+NVRM_CLOCK_SOURCE('V', 'd', 'e', 'B', 'u', 's', ' ', ' ', Vbus)
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks.h
new file mode 100644
index 000000000000..2f3cb8837c51
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks.h
@@ -0,0 +1,1356 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_CLOCKS_H
+#define INCLUDED_NVRM_CLOCKS_H
+
+#include "nvrm_clocks_limits_private.h"
+#include "nvrm_module.h"
+#include "nvrm_diag.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+#define NVRM_RESET_DELAY (10)
+#define NVRM_CLOCK_CHANGE_DELAY (2)
+#define NVRM_VARIABLE_DIVIDER ((NvU32)-1)
+
+// Fixed HDMI frequencies
+#define NVRM_HDMI_480_FIXED_FREQ_KHZ (27000)
+#define NVRM_HDMI_720p_1080i_FIXED_FREQ_KHZ (74250)
+#define NVRM_HDMI_720p_1080p_FIXED_FREQ_KHZ (148500)
+
+// BR-fixed PLLP output frequency in kHz (override disabled)
+#define NV_BOOT_PLLP_FIXED_FREQ_KHZ (432000)
+
+// RM-fixed PLLP output frequency in kHz (override enabled)
+#define NVRM_PLLP_FIXED_FREQ_KHZ (216000)
+
+// PLLP1-PLLP4 configurations set by RM during initialization and resume
+// from LP0 state. PLLP1 and PLLP3 settings are never changed. PLLP2 and
+// PLLP4 settings are overwritten according to SoC-specific DVFS policy.
+// PLLPx output frequency = NVRM_PLLP_FIXED_FREQ_KHZ / (1 + setting/2)
+#define NVRM_FIXED_PLLP1_SETTING (13)
+#define NVRM_FIXED_PLLP2_SETTING (7)
+#define NVRM_FIXED_PLLP3_SETTING (4)
+#define NVRM_FIXED_PLLP4_SETTING (2)
+
+/// Guaranteed MIPI PLL Stabilization Delay
+#define NVRM_PLL_MIPI_STABLE_DELAY_US (1000)
+
+/**
+ * MIPI PLL feedback divider N threshold for loop filter control setting:
+ * LFCON = 1 if N is above threshold, and LFCON = 0, otherwise
+ */
+#define NVRM_PLL_MIPI_LFCON_SELECT_N_DIVIDER (600)
+
+/**
+ * MIPI PLL feedback divider N thresholds for charge pump control setting
+ * selection.
+ */
+#define NVRM_PLL_MIPI_CPCON_SELECT_STEPS_N_DIVIDER \
+ 0, /* CPCON = 1 if feedback divider N = 0 (invalid setting)*/ \
+ 50, /* CPCON = 2 if feedback divider N <= 50 */ \
+ 175, /* CPCON = 3 if feedback divider N = ( 50 - 175] */ \
+ 300, /* CPCON = 4 if feedback divider N = (175 - 300] */ \
+ 375, /* CPCON = 5 if feedback divider N = (300 - 375] */ \
+ 450, /* CPCON = 6 if feedback divider N = (375 - 450] */ \
+ 525, /* CPCON = 7 if feedback divider N = (450 - 525] */ \
+ 600, /* CPCON = 8 if feedback divider N = (525 - 600] */ \
+ 700, /* CPCON = 9 if feedback divider N = (600 - 700] */ \
+ 800, /* CPCON = 10 if feedback divider N = (700 - 800] */ \
+ 900, /* CPCON = 11 if feedback divider N = (800 - 900] */ \
+ 1000 /* CPCON = 12 if feedback divider N = (900 - 1000] */
+ /* CPCON = 13 if feedback divider N > 1000 (invalid setting) */
+
+/// Guaranteed Low power PLL Stabilization Delay
+#define NVRM_PLL_LP_STABLE_DELAY_US (300)
+
+/**
+ * Low power PLL feedback divider N threshold for charge pump control. For N
+ * values below threshold charge pump control is always set to 1. For N values
+ * above threshold charge pump control setting depends on comparison frequency
+ * as specified in the table below.
+ */
+#define NVRM_PLL_LP_MIN_N_FOR_CPCON_SELECTION (200)
+
+/**
+ * Low power PLL comparison frequency Fcomp = Din/M thresholds for charge pump
+ * control setting selection.
+ */
+#define NVRM_PLL_LP_CPCON_SELECT_STEPS_KHZ \
+ 6000, /* CPCON = 1 if Fin/M >= 6000 kHz (outside valid range)*/ \
+ 4000, /* CPCON = 2 if Fin/M = [4000 - 6000) kHz */ \
+ 3000, /* CPCON = 3 if Fin/M = [3000 - 4000) kHz */ \
+ 2000, /* CPCON = 4 if Fin/M = [2000 - 3000) kHz */ \
+ 1750, /* CPCON = 5 if Fin/M = [1750 - 2000) kHz */ \
+ 1500, /* CPCON = 6 if Fin/M = [1500 - 1750) kHz */ \
+ 1250, /* CPCON = 7 if Fin/M = [1250 - 1500) kHz */ \
+ 1000 /* CPCON = 8 if Fin/M = [1000 - 1250) kHz */
+ /* CPCON = 9 if Fin/M < 1000 kHz (outside valid range) */
+
+/// Combines PLL and PLL output divider settings for fixed pre-defined frequency
+typedef struct NvRmPllFixedConfigRec
+{
+ // Output pre-defined frequency
+ NvRmFreqKHz OutputKHz;
+
+ // Interanl PLL dividers settings
+ NvU32 M;
+ NvU32 N;
+ NvU32 P;
+
+ // Exteranl output divider settings
+ // (ignored if there is no output divider)
+ NvU32 D;
+} NvRmPllFixedConfig;
+
+/**
+ * Defines list of supported PLLA configurations (2 entries for 12.2896
+ * frequency that can be either truncated or rounded to KHz). The reference
+ * frequency for PLLA is fixed at 28.8MHz, therefore there is no dependency on
+ * oscillator frequency. Output frequency is divided by PLLA_OUT0 fractional
+ * divider.
+ */
+#define NVRM_PLLA_CONFIGURATIONS \
+ { 11289, 25, 49, 0, 8}, \
+ { 11290, 25, 49, 0, 8}, \
+ { 12000, 24, 50, 0, 8}, \
+ { 12288, 25, 64, 0, 10}, \
+ { 56448, 25, 49, 0, 0}, \
+ { 73728, 25, 64, 0, 0}
+
+// Default audio sync frequency
+#define NVRM_AUDIO_SYNC_KHZ (11289)
+
+/**
+ * Defines PLLU configurations for different oscillator frequencies. Output
+ * frequency is 12MHz for USB with no ULPI support, or 60MHz if null ULPI is
+ * supported, or 480MHz for HS PLL. PLLU_OUT0 does not have output divider.
+ *
+ */
+#define NVRM_PLLU_AT_12MHZ { 12000, 12, 384, 5, 0}
+#define NVRM_PLLU_AT_13MHZ { 12000, 13, 384, 5, 0}
+#define NVRM_PLLU_AT_19MHZ { 12000, 4, 80, 5, 0}
+#define NVRM_PLLU_AT_26MHZ { 12000, 26, 384, 5, 0}
+
+#define NVRM_PLLU_ULPI_AT_12MHZ { 60000, 12, 240, 2, 0}
+#define NVRM_PLLU_ULPI_AT_13MHZ { 60000, 13, 240, 2, 0}
+#define NVRM_PLLU_ULPI_AT_19MHZ { 60000, 4, 50, 2, 0}
+#define NVRM_PLLU_ULPI_AT_26MHZ { 60000, 26, 240, 2, 0}
+
+#define NVRM_PLLU_HS_AT_12MHZ { 480000, 12, 960, 1, 0}
+#define NVRM_PLLU_HS_AT_13MHZ { 480000, 13, 960, 1, 0}
+#define NVRM_PLLU_HS_AT_19MHZ { 480000, 4, 200, 1, 0}
+#define NVRM_PLLU_HS_AT_26MHZ { 480000, 26, 960, 1, 0}
+
+/**
+ * Defines PLLP configurations for different oscillator frequencies. Output
+ * frequency is always the same. PLLP_OUT0 does not have output divider
+ *
+ */
+#define NVRM_PLLP_AT_12MHZ { NVRM_PLLP_FIXED_FREQ_KHZ, 12, 432, 1, 0}
+#define NVRM_PLLP_AT_13MHZ { NVRM_PLLP_FIXED_FREQ_KHZ, 13, 432, 1, 0}
+#define NVRM_PLLP_AT_19MHZ { NVRM_PLLP_FIXED_FREQ_KHZ, 4, 90, 1, 0}
+#define NVRM_PLLP_AT_26MHZ { NVRM_PLLP_FIXED_FREQ_KHZ, 26, 432, 1, 0}
+
+/**
+ * Defines PLLD/PLLC 720p/1080i HDMI configurations for different oscillator
+ * frequencies. For both PLLC and PLLD output frequency is fixed as 4 * 74250
+ * = 594000. However, PLLC_OUT0 will be running at this frequency exactly, while
+ * PLLD_OUT0 will be runnig at half frequency 297000 (h/w divide by 2 always).
+ * This difference in source frequency is will be taken care by Display and
+ * HDMI clock dividers.
+ */
+#define NVRM_PLLHD_AT_12MHZ { 594000, 12, 594, 0, 0}
+#define NVRM_PLLHD_AT_13MHZ { 594000, 13, 594, 0, 0}
+#define NVRM_PLLHD_AT_19MHZ { 594000, 16, 495, 0, 0}
+#define NVRM_PLLHD_AT_26MHZ { 594000, 26, 594, 0, 0}
+
+// Minimum PLLD_OUT0 frequency when used for display clocks
+#define NVRM_PLLD_DISPLAY_MIN_KHZ (50000)
+
+// Display divider is part of the display module and it is not described
+// in central module clock information table. Hence, need this define.
+#define NVRM_DISPLAY_DIVIDER_MAX (128)
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/*
+ * Defines module clock state
+ */
+typedef enum
+{
+ // Module clock disable
+ ModuleClockState_Disable = 0,
+
+ // Module clock enable
+ ModuleClockState_Enable = 1,
+
+ ModuleClockState_Force32 = 0x7FFFFFFF
+} ModuleClockState;
+
+
+typedef enum
+{
+ NvRmClockSource_Invalid = 0,
+#define NVRM_CLOCK_SOURCE(A, B, C, D, E, F, G, H, x) NvRmClockSource_##x,
+#include "nvrm_clockids.h"
+#undef NVRM_CLOCK_SOURCE
+ NvRmClockSource_Num,
+ NvRmClockSource_Force32 = 0x7FFFFFFF
+} NvRmClockSource;
+
+
+typedef enum
+{
+ // Clock source with fixed frequency (e.g., oscillator, not configurable
+ // PLL, external clock, etc.)
+ NvRmClockSourceType_Fixed = 1,
+
+ // Clock source from configurable PLL
+ NvRmClockSourceType_Pll,
+
+ // Secondary clock source derived from oscillator, PLL or other secondary
+ // source via clock divider
+ NvRmClockSourceType_Divider,
+
+ // Core clock source derived from several input sources via 2-stage selector
+ // and rational super-clock divider
+ NvRmClockSourceType_Core,
+
+ // Selector clock source derived from several input sources via 1-stage selector
+ // and optional clock frequency doubler
+ NvRmClockSourceType_Selector,
+
+ NvRmClockSourceType_Num,
+ NvRmClockSourceType_Force32 = 0x7FFFFFFF
+} NvRmClockSourceType;
+
+typedef enum
+{
+ // No divider
+ NvRmClockDivider_None = 1,
+
+ // Integer divider by N
+ NvRmClockDivider_Integer,
+
+ // Integer divider by (N + 1)
+ NvRmClockDivider_Integer_1,
+
+ // Fractional divider by (N/2 + 1)
+ NvRmClockDivider_Fractional_2,
+
+ // Skipping N clocks out of every 16, i.e fout = fin * (16-N)/16
+ // (= to Keeper16 with 1-complemented settings N = 15 - M)
+ NvRmClockDivider_Skipper16,
+
+ // Keep M+1 clocks out of every 16, fout = fin * (M+1)/16
+ // (= to Skipper16 with 1-complemented setting M = 15 - N)
+ NvRmClockDivider_Keeper16,
+
+ // Integer divider by (N + 2) = cascade Fractional : Fixed 1/2
+ NvRmClockDivider_Integer_2,
+
+ NvRmClockDivider_Num,
+ NvRmClockDivider_Force32 = 0x7FFFFFFF
+} NvRmClockDivider;
+
+typedef enum
+{
+ // AP10 PLLs (PLLC and PLLA)
+ NvRmPllType_AP10 = 1,
+
+ // MIPI PLLs (PLLD and PLLU on AP15)
+ NvRmPllType_MIPI,
+
+ // Low Power PLLs (PLLA, PLLC, PLLM, PLLP, PLLX, PLLS)
+ NvRmPllType_LP,
+
+ // AP20 USB HS PLL (PLLU on AP20)
+ NvRmPllType_UHS,
+
+ NvRmPllType_Num,
+ NvRmPllType_Force32 = 0x7FFFFFFF
+} NvRmPllType;
+
+/**
+ * Defines PLL configuration flags which are applicable for some PLLs.
+ * Multiple flags can be OR'ed and passed to the NvRmPrivAp15PllSet() API.
+ */
+typedef enum
+{
+ /// Use Slow Mode output for MIPI PLL
+ NvRmPllConfigFlags_SlowMode = 0x1,
+
+ /// Use Fast Mode output for MIPI PLL
+ NvRmPllConfigFlags_FastMode = 0x2,
+
+ /// Enable differential outputs for MIPI PLL
+ NvRmPllConfigFlags_DiffClkEnable = 0x4,
+
+ /// Disable differential outputs for MIPI PLL
+ NvRmPllConfigFlags_DiffClkDisable = 0x8,
+
+ /// Override fixed configuration for PLLP
+ NvRmPllConfigFlags_Override = 0x10,
+
+ /// Enable duty cycle correction for LP PLL
+ NvRmPllConfigFlags_DccEnable = 0x20,
+
+ /// Disable duty cycle correction for LP PLL
+ NvRmPllConfigFlags_DccDisable = 0x40,
+
+ NvRmPllConfigFlags_Num,
+ NvRmPllConfigFlags_Force32 = 0x7FFFFFFF
+} NvRmPllConfigFlags;
+
+/*****************************************************************************/
+
+// Holds source selection and divider configuration for module clock as well
+// as module reset information.
+typedef struct NvRmModuleClockInfoRec
+{
+ NvRmModuleID Module;
+ NvU32 Instance;
+ NvU32 SubClockId;
+
+ NvRmClockSource Sources[NvRmClockSource_Num];
+ NvRmClockDivider Divider;
+
+ NvU32 ClkSourceOffset;
+
+ NvU32 SourceFieldMask;
+ NvU32 SourceFieldShift;
+
+ NvU32 DivisorFieldMask;
+ NvU32 DivisorFieldShift;
+
+ NvU32 ClkEnableOffset;
+ NvU32 ClkEnableField;
+ NvU32 ClkResetOffset;
+ NvU32 ClkResetField;
+
+ NvRmDiagModuleID DiagModuleID;
+}NvRmModuleClockInfo;
+
+typedef struct NvRmModuleClockStateRec
+{
+ NvU32 Divider;
+ NvU32 SourceClock;
+ NvRmFreqKHz actual_freq;
+ NvU32 refCount;
+ NvU32 Vstep;
+ NvBool Vscale;
+#if NVRM_DIAG_LOCK_SUPPORTED
+ NvBool DiagLock; // once locked, can not be changed
+#endif
+} NvRmModuleClockState;
+
+/*****************************************************************************/
+
+// Holds configuration information about the fixed clock source that can be
+// only enabled/disabled (e.g, oscillator, external clock, fixed frequency PLL).
+typedef struct NvRmFixedClockInfoRec
+{
+ // Source ID
+ NvRmClockSource SourceId;
+
+ // Fixed source input (must be fixed source as well). For primary sources
+ // this field is set to NvRmClockSource_Invalid
+ NvRmClockSource InputId;
+
+ // Enable register offset and field
+ NvU32 ClkEnableOffset;
+ NvU32 ClkEnableField;
+} NvRmFixedClockInfo;
+
+
+// Holds configuration information about configurable PLL
+typedef struct NvRmPllClockInfoRec
+{
+ // PLL output ID
+ NvRmClockSource SourceId;
+
+ // PLL reference clock ID
+ NvRmClockSource InputId;
+
+ // PLL type
+ NvRmPllType PllType;
+
+ // Ofsets of PLL registers
+ NvU32 PllBaseOffset;
+ NvU32 PllMiscOffset;
+
+ // PLL VCO range
+ NvRmFreqKHz PllVcoMin;
+ NvRmFreqKHz PllVcoMax;
+} NvRmPllClockInfo;
+
+
+// Holds configuration information about secondary clock source derived
+// from one input source via clock divider
+typedef struct NvRmDividerClockInfoRec
+{
+ // Divider output clock ID
+ NvRmClockSource SourceId;
+
+ // Divider input clock ID
+ NvRmClockSource InputId;
+
+ // Type of the divider
+ NvRmClockDivider Divider;
+
+ // Divider control register offset
+ NvU32 ClkControlOffset;
+
+ // Clock rate parameter field;
+ // ignored for divider with fixed setting
+ NvU32 ClkRateFieldMask;
+ NvU32 ClkRateFieldShift;
+
+ // Divider control field
+ NvU32 ClkControlField;
+ NvU32 ClkEnableSettings;
+ NvU32 ClkDisableSettings;
+
+ // Fixed divider rate parameter setting;
+ // NVRM_VARIABLE_DIVIDER if divider is variable
+ NvU32 FixedRateSetting;
+} NvRmDividerClockInfo;
+
+
+typedef enum
+{
+ // The enumeartion values must not be changed for Mode(ModeField) formula
+ // below to work properly
+ NvRmCoreClockMode_Suspend = 0,
+ NvRmCoreClockMode_Idle = 1,
+ NvRmCoreClockMode_Run = 2,
+ NvRmCoreClockMode_Irq = 3,
+ NvRmCoreClockMode_Fiq = 4,
+
+ NvRmCoreClockMode_Num,
+ NvRmCoreClockMode_Force32 = 0x7FFFFFFF
+} NvRmCoreClockMode;
+
+// Holds configuration information about core clock source derived from several
+// input sources via 2-stage selector and rational super-clock divider
+typedef struct NvRmCoreClockInfoRec
+{
+ // Core clock ID
+ NvRmClockSource SourceId;
+
+ // Super clock input sources, same in each mode
+ NvRmClockSource Sources[NvRmClockSource_Num];
+
+ // Offset of the core clock input source selector register
+ NvU32 SelectorOffset;
+
+ // Clock mode field:
+ // 0 => NvRmCoreClockMode_Suspend (0)
+ // 1 => NvRmCoreClockMode_Idle (1)
+ // 2-3 => NvRmCoreClockMode_Run (2)
+ // 4-7 => NvRmCoreClockMode_Irq (3)
+ // 8-15 => NvRmCoreClockMode_Fiq (4)
+ // Mode = (ModeField == 0) ? NvRmCoreClockMode_Suspend : (1 + LOG2(ModeField))
+ NvU32 ModeFieldMask;
+ NvU32 ModeFieldShift;
+
+ // Sorce selection fileds for each mode
+ NvU32 SourceFieldMasks[NvRmCoreClockMode_Num];
+ NvU32 SourceFieldShifts[NvRmCoreClockMode_Num];
+
+ // Offset of the divider register
+ NvU32 DividerOffset;
+
+ // Divider enable field (divider is by-passed if disabled)
+ // Fout = Fin * (Dividend + 1) / (Divisor + 1)
+ NvU32 DividerEnableFiledMask;
+ NvU32 DividerEnableFiledShift;
+
+ // Dividend field
+ NvU32 DividendFieldMask;
+ NvU32 DividendFieldShift;
+ NvU32 DividendFieldSize;
+
+ // Divisor field
+ NvU32 DivisorFieldMask;
+ NvU32 DivisorFieldShift;
+ NvU32 DivisorFieldSize;
+} NvRmCoreClockInfo;
+
+// Holds configuration information about secondary clock source derived from
+// several input sources via 1-stage selector and clock frequency doubler
+typedef struct NvRmSelectorClockInfoRec
+{
+ // Selector output clock ID
+ NvRmClockSource SourceId;
+
+ // Selector input sources
+ NvRmClockSource Sources[NvRmClockSource_Num];
+
+ // Offset of the input source selector register
+ NvU32 SelectorOffset;
+
+ // Source selection field
+ NvU32 SourceFieldMask;
+ NvU32 SourceFieldShift;
+
+ // Doubler control (optional - set field to 0, if no doubler)
+ NvU32 DoublerEnableOffset;
+ NvU32 DoublerEnableField;
+} NvRmSelectorClockInfo;
+
+// Holds information on system bus clock dividers
+typedef struct NvRmSystemBusComplexInfoRec
+{
+ // Offset of the Bus Rates control register
+ NvU32 BusRateOffset;
+
+ // Combined bus clocks disable fields (1 = disable)
+ NvU32 BusClockDisableFields;
+
+ // V-pipe vclk divider field: vclk rate = system core rate * (n+1) /16
+ // All fields are 0, if VDE (V-pipe) clock is decoupled from the System bus
+ NvU32 VclkDividendFieldMask;
+ NvU32 VclkDividendFieldShift;
+ NvU32 VclkDividendFieldSize;
+
+ // AHB hclk divider field: hclk rate = system core rate / (n+1)
+ NvU32 HclkDivisorFieldMask;
+ NvU32 HclkDivisorFieldShift;
+ NvU32 HclkDivisorFieldSize;
+
+ // APB pclk divider field: pclk rate = hclk rate / (n+1)
+ NvU32 PclkDivisorFieldMask;
+ NvU32 PclkDivisorFieldShift;
+ NvU32 PclkDivisorFieldSize;
+} NvRmSystemBusComplexInfo;
+
+/*****************************************************************************/
+
+typedef union
+{
+ NvRmFixedClockInfo* pFixed;
+ NvRmPllClockInfo* pPll;
+ NvRmDividerClockInfo* pDivider;
+ NvRmCoreClockInfo* pCore;
+ NvRmSelectorClockInfo* pSelector;
+} NvRmClockSourceInfoPtr;
+
+// Abstarcts clock source information for different source types.
+typedef struct NvRmClockSourceInfoRec
+{
+ // Clock source ID
+ NvRmClockSource SourceId;
+
+ // Clock source type
+ NvRmClockSourceType SourceType;
+
+ // Pointer to clock source information
+ NvRmClockSourceInfoPtr pInfo;
+} NvRmClockSourceInfo;
+
+/*****************************************************************************/
+
+// Holds PLL references
+typedef struct NvRmPllReferenceRec
+{
+ // PLL ID
+ NvRmClockSource SourceId;
+
+ // Stop PLL during low power state flag (reported by DFS to kernel)
+ NvRmDfsStatusFlags StopFlag;
+
+ // Reference counter
+ NvU32 ReferenceCnt;
+
+ // Module clocks reference array
+ NvBool* AttachedModules;
+
+ // External clock attachment reference count (debugging only)
+ NvU32 ExternalClockRefCnt;
+} NvRmPllReference;
+
+/**
+ * Holds DFS clock source configuration record
+ */
+typedef struct NvRmDfsSourceRec
+{
+ // DFS Clock Source Id
+ NvRmClockSource SourceId;
+
+ // DFS Clock Source frequency
+ // CPU and System/AVP clock domains: this field holds input frequency
+ // of core super-divider (from base PLL output or secondary PLL divider)
+ // V-pipe domain (if it is decoupled from System bus): this field holds
+ // output frequency of VDE module divider = VDE domain frequency
+ // EMC domain: this field holds EMC2x frequency specified in selected
+ // entry in EMC configuration table
+ NvRmFreqKHz SourceKHz;
+
+ // DFS Clock Source divider setting
+ // CPU and System/AVP clock domains: this field holds settings for
+ // secondary PLL divider between base PLL output and super-divider
+ // V-pipe domain (if it is decoupled from System bus): this field holds
+ // settings for VDE module clock divider
+ // EMC domain: this field holds index into EMC configuration table
+ NvU32 DividerSetting;
+
+ // Minimum Voltage required to run DFS domain from this source
+ NvRmMilliVolts MinMv;
+} NvRmDfsSource;
+
+/**
+ * Combines frequencies for DFS controlled clock domains
+ */
+typedef struct NvRmDfsFrequenciesRec
+{
+ NvRmFreqKHz Domains[NvRmDfsClockId_Num];
+} NvRmDfsFrequencies;
+
+/*****************************************************************************/
+
+/*
+ * Defines execution platforms
+ */
+typedef enum
+{
+ // SoC Chip
+ ExecPlatform_Soc = 0x1,
+
+ // FPGA
+ ExecPlatform_Fpga,
+
+ // QuickTurn
+ ExecPlatform_Qt,
+
+ // Simulation
+ ExecPlatform_Sim,
+
+ ExecPlatform_Force32 = 0x7FFFFFFF
+} ExecPlatform;
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/**
+ * Determines execution platform.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ *
+ * @return Execution platform ID.
+ */
+ExecPlatform NvRmPrivGetExecPlatform(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Initializes clock sources frequencies.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pClockSourceFreq A pointer to the source frequencies table to be
+ * filled in by this function.
+ */
+void
+NvRmPrivClockSourceFreqInit(
+ NvRmDeviceHandle hRmDevice,
+ NvU32* pClockSourceFreq);
+
+/**
+ * Initializes bus clocks.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param SystemFreq The system bus frequency
+ */
+void
+NvRmPrivBusClockInit(NvRmDeviceHandle hRmDevice, NvRmFreqKHz SystemFreq);
+
+/**
+ * Initializes PLL power rails and synchronizes PMU ref count
+ *
+ * @param hRmDevice The RM device handle.
+ */
+void
+NvRmPrivPllRailsInit(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Set nominal core and DDR I/O voltages and boosts core and memory
+ * clocks to maximum.
+ *
+ * @param hRmDevice The RM device handle.
+ */
+void
+NvRmPrivBoostClocks(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Enables/disables module clock (private utility directly accessing h/w,
+ * no ref counting).
+ *
+ * @param hDevice The RM device handle.
+ * @param ModuleId Combined module ID and instance of the target module.
+ * @param ClockState Target clock state.
+ */
+void
+NvRmPrivEnableModuleClock(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID ModuleId,
+ ModuleClockState ClockState);
+
+/**
+ * Gets currently selected clock source for the specified core clock.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the core clock description structure.
+ *
+ * @return Core clock source ID.
+ */
+NvRmClockSource
+NvRmPrivCoreClockSourceGet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmCoreClockInfo* pCinfo);
+
+/**
+ * Gets core clock frequency.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the core clock description structure.
+ *
+ * @return Core clock frequency in kHz.
+ */
+NvRmFreqKHz
+NvRmPrivCoreClockFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmCoreClockInfo* pCinfo);
+
+/**
+ * Finds the slection index of the specified core clock source.
+ *
+ * @param pCinfo Pointer to the core clock description structure.
+ * @param SourceId Id of the clock source to find index of
+ * @param pSourceIndex Output storage pointer for the clock source index;
+ * returns NvRmClockSource_Num if specified source Id can not be found
+ * in the core clock descriptor.
+ */
+void
+NvRmPrivCoreClockSourceIndexFind(
+ const NvRmCoreClockInfo* pCinfo,
+ NvRmClockSource SourceId,
+ NvU32* pSourceIndex);
+
+/**
+ * Finds the best source for the target core clock frequency.
+ * The best source is a valid source with frequency above and closest
+ * to the target; if such source does not exist, the best source is a
+ * valid source below and closest to the target. If no valid source
+ * exists (i.e., all available find source are above maximum domain
+ * frequency)
+ *
+ * @param pCinfo Pointer to the core clock description structure.
+ * @param MaxFreq Upper limit for source frequency in kHz
+ * @param Target frequency in kHz
+ * @param pSourceFreq Output storage pointer for the best source frequency;
+ * returns 0 if no valid source below upper limit was found
+ * @param pSourceIndex Output storage pointer for the best source index in
+ * core clock descriptor; returns NvRmClockSource_Num if no valid source
+ * was found
+ */
+void
+NvRmPrivCoreClockBestSourceFind(
+ const NvRmCoreClockInfo* pCinfo,
+ NvRmFreqKHz MaxFreq,
+ NvRmFreqKHz TargetFreq,
+ NvRmFreqKHz* pSourceFreq,
+ NvU32* pSourceIndex);
+
+/**
+ * Sets "as is" specified core clock configuration.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the core clock description structure.
+ * @param SourceId The ID of the clock source to drive core clock.
+ * @param m Superdivider dividend value.
+ * @param n Superdivider divisor value.
+ *
+ * There is no error return status for this API call.
+ * If specified source can not be selected(not present
+ * in core clock descriptor), asserts are encountered.
+ */
+void
+NvRmPrivCoreClockSet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmCoreClockInfo* pCinfo,
+ NvRmClockSource SourceId,
+ NvU32 m,
+ NvU32 n);
+
+/**
+ * Configures core clock frequency.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the core clock description structure.
+ * @param MaxFreq Upper limit for clock source frequency in kHz.
+ * @param pFreq Pointer to the target frequency in kHz on entry; updated
+ * with actual clock frequencies on exit.
+ * @param pSourceId Pointer to the target clock source ID on entry; if set
+ * to NvRmClockSource_Num, no source target is specified, and the best source
+ * for the target frequency is selected automatically. On exit, points to the
+ * actually selected source ID.
+ *
+ * @retval NvSuccess if core clock was configured successfully.
+ * @retval NvError_NotSupported if the specified target source is invalid or
+ * no target source specified and no valid source was found.
+ */
+NvError
+NvRmPrivCoreClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmCoreClockInfo* pCinfo,
+ NvRmFreqKHz MaxFreq,
+ NvRmFreqKHz* pFreq,
+ NvRmClockSource* pSourceId);
+
+/**
+ * Gets bus clocks frequencies.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param SystemFreq System core clock frequency in kHz.
+ * @param pVclkFreq Output storage pointer for V-bus clock frequency in kHz.
+ * If VDE clock is decoupled from the System bus, 0kHz will be returned.
+ * @param pHclkFreq Output storage pointer for AHB clock frequency in kHz.
+ * @param pPclkFreq Output storage pointer for APB clock frequency in kHz.
+ */
+void
+NvRmPrivBusClockFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz SystemFreq,
+ NvRmFreqKHz* pVclkFreq,
+ NvRmFreqKHz* pHclkFreq,
+ NvRmFreqKHz* pPclkFreq);
+
+/**
+ * Configures bus clocks frequencies.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param SystemFreq System core clock frequency in kHz.
+ * @param pVclkFreq Pointer to the target V-bus clock frequency in kHz
+ * on entry, updated with actually set frequency on exit. If VDE clock
+ * is decoupled from the System bus, 0kHz will be returned.
+ * @param pHclkFreq Pointer to the target AHB clock frequency in kHz
+ * on entry, updated with actually set frequency on exit.
+ * @param pPclkFreq Pointer to the target APB clock frequency in kHz
+ * on entry, updated with actually set frequency on exit.
+ * @param PclkMaxFreq APB clock maximum frequency; APB is the only clock
+ * in the system complex that may have different (lower) maximum limit.
+ */
+void
+NvRmPrivBusClockFreqSet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz SystemFreq,
+ NvRmFreqKHz* pVclkFreq,
+ NvRmFreqKHz* pHclkFreq,
+ NvRmFreqKHz* pPclkFreq,
+ NvRmFreqKHz PclkMaxFreq);
+
+/**
+ * Reconfigures PLLX0 to specified frequency (and switches CPU to back-up
+ * PLLP0 if PLLX0 is currently used as CPU source).
+ *
+ * @param hRmDevice The RM device handle.
+ * @param TargetFreq New PLLX0 output frequency.
+ */
+void
+NvRmPrivReConfigurePllX(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz TargetFreq);
+
+/**
+ * Reconfigures PLLC0 to specified frequency (switches to PLLP0 all modules
+ * that use PLLC0 as a source, and then restores source configuration back).
+ * Should be called only when core voltage is set at nominal.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param TargetFreq New PLLC0 output frequency.
+ */
+void
+NvRmPrivReConfigurePllC(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz TargetFreq);
+
+/**
+ * Gets maximum PLLC0 frequency set as a default target, when there are no
+ * fixed frequency requirements.
+ *
+ * @param hRmDevice The RM device handle.
+ *
+ * @return Maximum target for PLLC0 frequency.
+ */
+NvRmFreqKHz NvRmPrivGetMaxFreqPllC(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Configures PLLC0 at maximum frequency, when there are no fixed frequency
+ * requirements. Should be called only when core voltage is set at nominal.
+ *
+ * @param hRmDevice The RM device handle.
+ *
+ * @return Maximum target for PLLC0 frequency.
+ */
+void NvRmPrivBoostPllC(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Updates PLL frequency entry in the clock source table.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the PLL description structure.
+ */
+void
+NvRmPrivPllFreqUpdate(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmPllClockInfo* pCinfo);
+
+/**
+ * Updates divider frequency entry in the clock source table.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the divider clock description structure.
+ */
+void
+NvRmPrivDividerFreqUpdate(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDividerClockInfo* pCinfo);
+
+/**
+ * Sets "as is" specified divider parmeter.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the divider clock description structure.
+ * @param setting Divider setting
+ */
+void
+NvRmPrivDividerSet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDividerClockInfo* pCinfo,
+ NvU32 setting);
+
+/**
+ * Gets divider output frequency.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the divider clock description structure.
+ *
+ * @return Divider output frequency in kHz; zero if divider itself or
+ * divider's input clock is disabled.
+ */
+NvRmFreqKHz
+NvRmPrivDividerFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDividerClockInfo* pCinfo);
+
+/**
+ * Finds minimum divider output frequency, which is above the specified
+ * target frequency.
+ *
+ * @param DividerType Divider type (only fractional dividers for now).
+ * @param pCinfo SourceKHz Divider source (input) frequency in kHz.
+ * @param MaxKHz Output divider frequency upper limit. Target frequency must
+ * be below this limit. If no frequency above the target but within the limit
+ * can be found, then maximum frequency within the limit is returned.
+ * @param pTargetKHz A pointer to the divider output frequency. On entry
+ * specifies target; on exit - found frequency.
+ *
+ * @return Divider setting to get found frequency from the given source.
+ */
+NvU32
+NvRmPrivFindFreqMinAbove(
+ NvRmClockDivider DividerType,
+ NvRmFreqKHz SourceKHz,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz* pTargetKHz);
+
+/**
+ * Finds maximum divider output frequency, which is below the specified
+ * target frequency.
+ *
+ * @param DividerType Divider type (only fractional dividers for now).
+ * @param pCinfo SourceKHz Divider source (input) frequency in kHz.
+ * @param MaxKHz Output divider frequency upper limit. Target frequency must
+ * be below this limit.
+ * @param pTargetKHz A pointer to the divider output frequency. On entry
+ * specifies target; on exit - found frequency.
+ *
+ * @return Divider setting to get found frequency from the given source.
+ */
+NvU32
+NvRmPrivFindFreqMaxBelow(
+ NvRmClockDivider DividerType,
+ NvRmFreqKHz SourceKHz,
+ NvRmFreqKHz MaxKHz,
+ NvRmFreqKHz* pTargetKHz);
+
+/**
+ * Sets "as is" specified slector clock configuration.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the selector clock description structure.
+ * @param SourceId The ID of the input clock source to select.
+ * @param Double If true, enable output doubler. If false, disable
+ * output doubler.
+ *
+ * There is no error return status for this API call.
+ * If specified source can not be selected(not present
+ * in core clock descriptor), asserts are encountered.
+ */
+void
+NvRmPrivSelectorClockSet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmSelectorClockInfo* pCinfo,
+ NvRmClockSource SourceId,
+ NvBool Double);
+
+/**
+ * Parses clock sources configuration table of the given type.
+ *
+ * @param pDst The pointer to the list of the clock source records the results
+ * of parsing are to be stored in. The records in this list are arranged in
+ * the order of source IDs.
+ * @param DestinationTableSize Maximum number of sources that can be recorded.
+ * @param The clock source configuration table to be parsed.
+ * @param SourceTableSize Number of records to be parsed.
+ * @param SourceType The type of source records to be parsed.
+ */
+void
+NvRmPrivParseClockSources(
+ NvRmClockSourceInfo* pDst,
+ NvU32 DestinationTableSize,
+ NvRmClockSourceInfoPtr Src,
+ NvU32 SourceTableSize,
+ NvRmClockSourceType SourceType);
+
+/**
+ * Gets pointer to the given clock source descriptor.
+ *
+ * @param id The targeted clock source ID.
+ *
+ * @return A pointer to the specified clock source descriptor.
+ * NULL is returned, if the target clock source is not valid.
+ */
+NvRmClockSourceInfo* NvRmPrivGetClockSourceHandle(NvRmClockSource id);
+
+/**
+ * Gets given clock source frequency,
+ *
+ * @param id The targeted clock source ID.
+ *
+ * @return Clock source frequency in KHz.
+ */
+NvRmFreqKHz NvRmPrivGetClockSourceFreq(NvRmClockSource id);
+
+/**
+ * Verifies if the specified clock source is currently selected
+ * by the specified module.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param SourceId The clock source ID to be verified.
+ * @param ModuleId The combined module id and instance of the module in question.
+ *
+ * @return True if specified clock source is selected by the module;
+ * False returned, otherwise.
+ */
+NvBool
+NvRmPrivIsSourceSelectedByModule(
+ NvRmDeviceHandle hRmDevice,
+ NvRmClockSource SourceId,
+ NvRmModuleID ModuleId);
+
+/**
+ * Verifies if specified frequency range is reachable from the given
+ * clock source.
+ *
+ * @param SourceFreq Clock source frequency in KHz.
+ * @param MinFreq Frequency range low boundary in KHz.
+ * @param MaxFreq Frequency range high boundary in KHz.
+ * @param MaxDivisor Maximum possible source clock divisor.
+ *
+ * @return True, if whole divisor can be found so that divided source
+ * frequency is within the range boundaries; False, otherwise.
+ */
+NvBool
+NvRmIsFreqRangeReachable(
+ NvRmFreqKHz SourceFreq,
+ NvRmFreqKHz MinFreq,
+ NvRmFreqKHz MaxFreq,
+ NvU32 MaxDivisor);
+
+/**
+ * Reports if clock/voltage diagnostic is in progress for the specified module.
+ *
+ * @param ModuleId The combined module id and instance of the module in question.
+ * If set to NvRmModuleID_Invalid reports if diagnostic is in progress for any
+ * module.
+ *
+ * @return True, if clock/voltage diagnostic is in progress; False, otherwise.
+ */
+NvBool NvRmPrivIsDiagMode(NvRmModuleID ModuleId);
+
+/**
+ * Gets clock frequency limits for the specified SoC module.
+ *
+ * @param ModuleId The targeted module ID.
+ *
+ * @return The pointer to the clock limts structure for the given module ID.
+ */
+const NvRmModuleClockLimits* NvRmPrivGetSocClockLimits(NvRmModuleID Module);
+
+/**
+ * Locks/Unclocks acces to shared PLL
+ */
+void NvRmPrivLockSharedPll(void);
+void NvRmPrivUnlockSharedPll(void);
+
+/**
+ * Enable/Disable the clock source for the module.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param ModuleId Module ID and instace information.
+ * @param enbale Should the clock source be enabled or disabled.
+ */
+void
+NvRmPrivConfigureClockSource(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID ModuleId,
+ NvBool enable);
+
+/**
+ * Gets pointers to clock descriptor and clock state for the given module.
+ *
+ * @param hDevice The RM device handle.
+ * @param ModuleId Module ID and instance information.
+ * @param CinfoOut A pointer to a variable that this function sets to the
+ * clock descriptor pointer.
+ * @param StateOut A pointer to a variable that this function sets to the
+ * clock state pointer.
+ *
+ * @retval NvSuccess if busy request completed successfully.
+ * @retval NvError_NotSupported if no clock descriptor for the given module.
+ * @retval NvError_ModuleNotPresent if the given module is not listed in
+ * relocation table.
+ */
+NvError
+NvRmPrivGetClockState(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId,
+ NvRmModuleClockInfo** CinfoOut,
+ NvRmModuleClockState** StateOut);
+
+/**
+ * Updates memory controller clock source reference counts.
+ *
+ * @param hDevice The RM device handle.
+ * @param pCinfo Pointer to the memory controller clock descriptor.
+ * @param pCstate Pointer to the memory controller clock state.
+ */
+void
+NvRmPrivMemoryClockReAttach(
+ NvRmDeviceHandle hDevice,
+ const NvRmModuleClockInfo* pCinfo,
+ const NvRmModuleClockState* pCstate);
+
+/**
+ * Updates generic module clock source reference counts.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the targeted module clock descriptor.
+ * @param pCstate Pointer to the targeted module clock state.
+ */
+void
+NvRmPrivModuleClockReAttach(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmModuleClockInfo* cinfo,
+ const NvRmModuleClockState* state);
+
+/**
+ * Updates external clock source references.
+ *
+ * @param hDevice The RM device handle.
+ * @param SourceId The external clock source ID.
+ * @param Enable NV_TRUE if external clock is enabled;
+ * NV_FALSE if external clock is disabled.
+ */
+void
+NvRmPrivExternalClockAttach(
+ NvRmDeviceHandle hDevice,
+ NvRmClockSource SourceId,
+ NvBool Enable);
+
+/**
+ * Updates PLL attachment reference count and PLL stop flag in the storage
+ * shared by RM and NV boot loader.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param pPllRef Pointer to the PLL references record.
+ * @param Increment If NV_TRUE, increment PLL reference count,
+ * if NV_FALSE, decrement PLL reference count.
+ */
+void
+NvRmPrivPllRefUpdate(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmPllReference* pPllRef,
+ NvBool Increment);
+
+/**
+ * Verifies if the targeted module is prohibited to use the specified clock
+ * source per clock manager policy.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param Module Target module ID.
+ * @param SourceId Clock source ID.
+ *
+ * @return NV_TRUE if the targeted module is prohibited to use the specified
+ * clock source; NV_FALSE if the targeted module can use the specified clock
+ * source.
+ */
+NvBool
+NvRmPrivIsSourceProtected(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module,
+ NvRmClockSource SourceId);
+
+/**
+ * Gets maximum avilable clock source frequency for the specified module
+ * per clock manager policy.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the targeted module clock descriptor.
+ *
+ * @return Source frequency in kHz.
+ */
+NvRmFreqKHz
+NvRmPrivModuleGetMaxSrcKHz(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmModuleClockInfo* pCinfo);
+
+/**
+ * Similar to the Rm pulbic Module reset API, but have the option of either
+ * pulsing or keeping the reset line active.
+ *
+ * @param hold if NV_TRUE keep the asserting the reset. If the value is
+ * NV_FALSE pulse a reset to the hardware module.
+ *
+ */
+void
+NvRmPrivModuleReset(NvRmDeviceHandle hDevice, NvRmModuleID ModuleId, NvBool hold);
+
+/**
+ * Updates voltage scaling references, when the specified module clock
+ * is enabled, or disabled.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param pCinfo Pointer to the targeted module clock descriptor.
+ * @param pCstate Pointer to the targeted module clock state.
+ * @param Enable NV_TRUE if module clock is about to be enabled;
+ * NV_FALSE if module clock has just been disabled.
+ *
+ * @return Core voltage level in mV required for the new module configuration.
+ * NvRmVoltsUnspecified is returned if module clock can be enabled without
+ * changing voltage requirements. NvRmVoltsOff is returned when module clock
+ * is disabled.
+ */
+NvRmMilliVolts
+NvRmPrivModuleVscaleAttach(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmModuleClockInfo* pCinfo,
+ NvRmModuleClockState* pCstate,
+ NvBool Enable);
+
+/**
+ * Updates voltage scaling references, when the clock frequency for the
+ * specified module is re-configured.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param pCinfo Pointer to the targeted module clock descriptor.
+ * @param pCstate Pointer to the targeted module clock state.
+ * @param TargetModuleKHz Traget module frequency in kHz.
+ * @param TargetSrcKHz Clock source frequency for the traget module in kHz.
+ *
+ * @return Core voltage level in mV required for new module configuration.
+ * NvRmVoltsUnspecified is returned if all specified frequencies can be
+ * configured without changing voltage requirements.
+ */
+NvRmMilliVolts
+NvRmPrivModuleVscaleReAttach(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmModuleClockInfo* pCinfo,
+ NvRmModuleClockState* pCstate,
+ NvRmFreqKHz TargetModuleKHz,
+ NvRmFreqKHz TargetSrcKHz);
+
+/**
+ * Sets voltage scaling attribute for the specified module clock.
+ *
+ * @param hRmDeviceHandle The RM device handle.
+ * @param pCinfo Pointer to the targeted module clock descriptor.
+ * @param pCstate Pointer to the targeted module clock state, which is updated
+ * by this function.
+ *
+ * @note The scaling attribute in the clock state structure is set NV_FALSE for
+ * all core clocks (CPU, AVP, system buses, memory). For modules designated
+ * clocks it is set NV_FALSE if any frequency within module clock limits can
+ * be selected at any core voltage level within SoC operational range.
+ * Otherwise, the attribute is set NV_TRUE.
+ */
+void
+NvRmPrivModuleSetScalingAttribute(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmModuleClockInfo* pCinfo,
+ NvRmModuleClockState* pCstate);
+
+/**
+ * Sets "as is" module clock configuration as specified by the given
+ * clock state structure.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pCinfo Pointer to the targeted module clock descriptor.
+ * @param pCstate Pointer to the targeted module clock state to be set
+ * by this function.
+ */
+void
+NvRmPrivModuleClockSet(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmModuleClockInfo* pCinfo,
+ const NvRmModuleClockState* pCstate);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // INCLUDED_NVRM_CLOCKS_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks_limits.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks_limits.c
new file mode 100644
index 000000000000..c6be024d198b
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks_limits.c
@@ -0,0 +1,939 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ * *
+ */
+
+#include "nvcommon.h"
+#include "nvrm_clocks.h"
+#include "nvassert.h"
+#include "nvrm_drf.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_boot.h"
+#include "nvbootargs.h"
+#include "nvrm_memmgr.h"
+#include "ap15/ap15rm_private.h"
+#include "ap15/project_relocation_table.h"
+
+
+#define NvRmPrivGetStepMV(hRmDevice, step) \
+ (s_ChipFlavor.pSocShmoo->ShmooVoltages[(step)])
+
+// Extended clock limits IDs
+typedef enum
+{
+ // Last Module ID
+ NvRmClkLimitsExtID_LastModuleID = NvRmPrivModuleID_Num,
+
+ // Extended ID for display A pixel clock limits
+ NvRmClkLimitsExtID_DisplayA,
+
+ // Extended ID for display B pixel clock limits
+ NvRmClkLimitsExtID_DisplayB,
+
+ // Extended ID for CAR clock sources limits
+ NvRmClkLimitsExtID_ClkSrc,
+
+ NvRmClkLimitsExtID_Num,
+ NvRmClkLimitsExtID_Force32 = 0x7FFFFFFF,
+} NvRmClkLimitsExtID;
+
+/*
+ * Module clocks frequency limits table ordered by s/w module ids.
+ * Display is a special case and has 3 entries associated:
+ * - one entry that corresponds to display ID specifies pixel clock limit used
+ * for CAR clock sources configuration; it is retrieved by RM clock manager
+ * via private interface (same limit for both CAR display clock selectors);
+ * - two entries appended at the end of the table specify pixel clock limits
+ * for two display heads used for DDK clock configuration, these limits will
+ * be retrieved by DDK via public interface
+ * Also appended at the end of the table limits for clock sources (PLLs) forced
+ * by CAR clock dividers
+ */
+static NvRmModuleClockLimits s_ClockRangeLimits[NvRmClkLimitsExtID_Num];
+
+// Translation table for module clock limits scaled with voltage
+static const NvRmFreqKHz* s_pClockScales[NvRmClkLimitsExtID_Num];
+
+// Reference counts of clocks that require the respective core voltage to run
+static NvU32 s_VoltageStepRefCounts[NVRM_VOLTAGE_STEPS];
+
+// Chip shmoo data records
+static NvRmChipFlavor s_ChipFlavor;
+static NvRmSocShmoo s_SocShmoo;
+static NvRmCpuShmoo s_CpuShmoo;
+static void* s_pShmooData = NULL;
+
+static NvError
+NvRmBootArgChipShmooGet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmChipFlavor* pChipFlavor);
+
+static void NvRmPrivChipFlavorInit(NvRmDeviceHandle hRmDevice)
+{
+ NvOsMemset((void*)&s_ChipFlavor, 0, sizeof(s_ChipFlavor));
+
+ if (NvRmPrivChipShmooDataInit(hRmDevice, &s_ChipFlavor) == NvSuccess)
+ {
+ NvOsDebugPrintf("NVRM Initialized shmoo database\n");
+ return;
+ }
+ if (NvRmBootArgChipShmooGet(hRmDevice, &s_ChipFlavor) == NvSuccess)
+ {
+ NvOsDebugPrintf("NVRM Got shmoo boot argument (at 0x%x)\n",
+ ((NvUPtr)s_pShmooData));
+ return;
+ }
+ NV_ASSERT(!"Failed to set clock limits");
+}
+
+const NvRmModuleClockLimits*
+NvRmPrivClockLimitsInit(NvRmDeviceHandle hRmDevice)
+{
+ NvU32 i;
+ NvRmFreqKHz CpuMaxKHz, AvpMaxKHz, VdeMaxKHz, TDMaxKHz, DispMaxKHz;
+ const NvRmSKUedLimits* pSKUedLimits;
+ const NvRmScaledClkLimits* pHwLimits;
+ const NvRmSocShmoo* pShmoo;
+
+ NV_ASSERT(hRmDevice);
+ NvRmPrivChipFlavorInit(hRmDevice);
+ pShmoo = s_ChipFlavor.pSocShmoo;
+ pHwLimits = &pShmoo->ScaledLimitsList[0];
+ pSKUedLimits = pShmoo->pSKUedLimits;
+
+ NvOsMemset((void*)s_pClockScales, 0, sizeof(s_pClockScales));
+ NvOsMemset(s_ClockRangeLimits, 0, sizeof(s_ClockRangeLimits));
+ NvOsMemset(s_VoltageStepRefCounts, 0, sizeof(s_VoltageStepRefCounts));
+ s_VoltageStepRefCounts[0] = NvRmPrivModuleID_Num; // all at minimum step
+
+ // Combine AVP/System clock absolute limit with scaling V/F ladder upper
+ // boundary, and set default clock range for all present modules the same
+ // as for AVP/System clock
+ AvpMaxKHz = pSKUedLimits->AvpMaxKHz;
+ for (i = 0; i < pShmoo->ScaledLimitsListSize; i++)
+ {
+ if (pHwLimits[i].HwDeviceId == NV_DEVID_AVP)
+ {
+ AvpMaxKHz = NV_MIN(
+ AvpMaxKHz, pHwLimits[i].MaxKHzList[pShmoo->ShmooVmaxIndex]);
+ break;
+ }
+ }
+
+ for (i = 0; i < NvRmPrivModuleID_Num; i++)
+ {
+ NvRmModuleInstance *inst;
+ if (NvRmPrivGetModuleInstance(hRmDevice, i, &inst) == NvSuccess)
+ {
+ s_ClockRangeLimits[i].MaxKHz = AvpMaxKHz;
+ s_ClockRangeLimits[i].MinKHz = NVRM_BUS_MIN_KHZ;
+
+ }
+ }
+
+ // Fill in limits for modules with slectable clock sources and/or dividers
+ // as specified by the h/w table according to the h/w device ID
+ // (CPU and AVP are not in relocation table - need translate id explicitly)
+ // TODO: need separate subclock limits? (current implementation applies
+ // main clock limits to all subclocks)
+ for (i = 0; i < pShmoo->ScaledLimitsListSize; i++)
+ {
+ NvRmModuleID id;
+ if (pHwLimits[i].HwDeviceId == NV_DEVID_CPU)
+ id = NvRmModuleID_Cpu;
+ else if (pHwLimits[i].HwDeviceId == NV_DEVID_AVP)
+ id = NvRmModuleID_Avp;
+ else if (pHwLimits[i].HwDeviceId == NVRM_DEVID_CLK_SRC)
+ id = NvRmClkLimitsExtID_ClkSrc;
+ else
+ id = NvRmPrivDevToModuleID(pHwLimits[i].HwDeviceId);
+ if ((id != NVRM_DEVICE_UNKNOWN) &&
+ (pHwLimits[i].SubClockId == 0))
+ {
+ s_ClockRangeLimits[id].MinKHz = pHwLimits[i].MinKHz;
+ s_ClockRangeLimits[id].MaxKHz =
+ pHwLimits[i].MaxKHzList[pShmoo->ShmooVmaxIndex];
+ s_pClockScales[id] = pHwLimits[i].MaxKHzList;
+ }
+ }
+ // Fill in CPU scaling data if SoC has dedicated CPU rail, and CPU clock
+ // characterization data is separated from other modules on common core rail
+ if (s_ChipFlavor.pCpuShmoo)
+ {
+ const NvRmScaledClkLimits* pCpuLimits =
+ s_ChipFlavor.pCpuShmoo->pScaledCpuLimits;
+ NV_ASSERT(pCpuLimits && (pCpuLimits->HwDeviceId == NV_DEVID_CPU));
+
+ s_ClockRangeLimits[NvRmModuleID_Cpu].MinKHz = pCpuLimits->MinKHz;
+ s_ClockRangeLimits[NvRmModuleID_Cpu].MaxKHz =
+ pCpuLimits->MaxKHzList[s_ChipFlavor.pCpuShmoo->ShmooVmaxIndex];
+ s_pClockScales[NvRmModuleID_Cpu] = pCpuLimits->MaxKHzList;
+ }
+
+ // Set AVP upper clock boundary with combined Absolute/Scaled limit;
+ // Sync System clock with AVP (System is not in relocation table)
+ s_ClockRangeLimits[NvRmModuleID_Avp].MaxKHz = AvpMaxKHz;
+ s_ClockRangeLimits[NvRmPrivModuleID_System].MaxKHz =
+ s_ClockRangeLimits[NvRmModuleID_Avp].MaxKHz;
+ s_ClockRangeLimits[NvRmPrivModuleID_System].MinKHz =
+ s_ClockRangeLimits[NvRmModuleID_Avp].MinKHz;
+ s_pClockScales[NvRmPrivModuleID_System] = s_pClockScales[NvRmModuleID_Avp];
+
+ // Set VDE upper clock boundary with combined Absolute/Scaled limit (on
+ // AP15/Ap16 VDE clock derived from the system bus, and VDE maximum limit
+ // must be the same as AVP/System).
+ VdeMaxKHz = pSKUedLimits->VdeMaxKHz;
+ VdeMaxKHz = NV_MIN(
+ VdeMaxKHz, s_ClockRangeLimits[NvRmModuleID_Vde].MaxKHz);
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ {
+ NV_ASSERT(VdeMaxKHz == AvpMaxKHz);
+ }
+ s_ClockRangeLimits[NvRmModuleID_Vde].MaxKHz = VdeMaxKHz;
+
+ // Set upper clock boundaries for devices on CPU bus (CPU, Mselect,
+ // CMC) with combined Absolute/Scaled limits
+ CpuMaxKHz = pSKUedLimits->CpuMaxKHz;
+ CpuMaxKHz = NV_MIN(
+ CpuMaxKHz, s_ClockRangeLimits[NvRmModuleID_Cpu].MaxKHz);
+ s_ClockRangeLimits[NvRmModuleID_Cpu].MaxKHz = CpuMaxKHz;
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ {
+ s_ClockRangeLimits[NvRmModuleID_CacheMemCtrl].MaxKHz = CpuMaxKHz;
+ s_ClockRangeLimits[NvRmPrivModuleID_Mselect].MaxKHz = CpuMaxKHz;
+ NV_ASSERT(s_ClockRangeLimits[NvRmClkLimitsExtID_ClkSrc].MaxKHz >=
+ CpuMaxKHz);
+ }
+ else if (hRmDevice->ChipId.Id == 0x20)
+ {
+ // No CMC; TODO: Mselect/CPU <= 1/4?
+ s_ClockRangeLimits[NvRmPrivModuleID_Mselect].MaxKHz = CpuMaxKHz >> 2;
+ }
+ else
+ {
+ NV_ASSERT(!"Unsupported chip ID");
+ }
+
+ // Fill in memory controllers absolute range (scaled data is on ODM level)
+ s_ClockRangeLimits[NvRmPrivModuleID_MemoryController].MaxKHz =
+ pSKUedLimits->McMaxKHz;
+ s_ClockRangeLimits[NvRmPrivModuleID_ExternalMemoryController].MaxKHz =
+ pSKUedLimits->Emc2xMaxKHz;
+ s_ClockRangeLimits[NvRmPrivModuleID_ExternalMemoryController].MinKHz =
+ NVRM_SDRAM_MIN_KHZ * 2;
+ s_ClockRangeLimits[NvRmPrivModuleID_ExternalMemory].MaxKHz =
+ pSKUedLimits->Emc2xMaxKHz / 2;
+ s_ClockRangeLimits[NvRmPrivModuleID_ExternalMemory].MinKHz =
+ NVRM_SDRAM_MIN_KHZ;
+
+ // Set 3D upper clock boundary with combined Absolute/Scaled limit.
+ TDMaxKHz = pSKUedLimits->TDMaxKHz;
+ TDMaxKHz = NV_MIN(
+ TDMaxKHz, s_ClockRangeLimits[NvRmModuleID_3D].MaxKHz);
+ s_ClockRangeLimits[NvRmModuleID_3D].MaxKHz = TDMaxKHz;
+
+ // Set Display upper clock boundary with combined Absolute/Scaled limit.
+ // (fill in clock limits for both display heads)
+ DispMaxKHz = NV_MAX(pSKUedLimits->DisplayAPixelMaxKHz,
+ pSKUedLimits->DisplayBPixelMaxKHz);
+ DispMaxKHz = NV_MIN(
+ DispMaxKHz, s_ClockRangeLimits[NvRmModuleID_Display].MaxKHz);
+ s_ClockRangeLimits[NvRmModuleID_Display].MaxKHz = DispMaxKHz;
+ s_ClockRangeLimits[NvRmClkLimitsExtID_DisplayA].MaxKHz =
+ NV_MIN(DispMaxKHz, pSKUedLimits->DisplayAPixelMaxKHz);
+ s_ClockRangeLimits[NvRmClkLimitsExtID_DisplayA].MinKHz =
+ s_ClockRangeLimits[NvRmModuleID_Display].MinKHz;
+ s_ClockRangeLimits[NvRmClkLimitsExtID_DisplayB].MaxKHz =
+ NV_MIN(DispMaxKHz, pSKUedLimits->DisplayBPixelMaxKHz);
+ s_ClockRangeLimits[NvRmClkLimitsExtID_DisplayB].MinKHz =
+ s_ClockRangeLimits[NvRmModuleID_Display].MinKHz;
+
+ return s_ClockRangeLimits;
+}
+
+NvRmFreqKHz
+NvRmPowerModuleGetMaxFrequency(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID ModuleId)
+{
+ NvU32 Instance = NVRM_MODULE_ID_INSTANCE(ModuleId);
+ NvRmModuleID Module = NVRM_MODULE_ID_MODULE(ModuleId);
+ NV_ASSERT(Module < NvRmPrivModuleID_Num);
+ NV_ASSERT(hRmDevice);
+
+ // For all modules, except display, ignore instance, and return
+ // max frequency for the clock generated from CAR dividers
+ if (Module != NvRmModuleID_Display)
+ return s_ClockRangeLimits[Module].MaxKHz;
+
+ // For display return pixel clock for the respective head
+ if (Instance == 0)
+ return s_ClockRangeLimits[NvRmClkLimitsExtID_DisplayA].MaxKHz;
+ else if (Instance == 1)
+ return s_ClockRangeLimits[NvRmClkLimitsExtID_DisplayB].MaxKHz;
+ else
+ {
+ NV_ASSERT(!"Inavlid display instance");
+ return 0;
+ }
+}
+
+NvRmMilliVolts
+NvRmPrivGetNominalMV(NvRmDeviceHandle hRmDevice)
+{
+ const NvRmSocShmoo* p = s_ChipFlavor.pSocShmoo;
+ return p->ShmooVoltages[p->ShmooVmaxIndex];
+}
+
+void
+NvRmPrivGetSvopParameters(
+ NvRmDeviceHandle hRmDevice,
+ NvRmMilliVolts* pSvopLowMv,
+ NvU32* pSvopLvSetting,
+ NvU32* pSvopHvSetting)
+{
+ const NvRmSocShmoo* p = s_ChipFlavor.pSocShmoo;
+
+ NV_ASSERT(pSvopLowMv && pSvopLvSetting && pSvopHvSetting);
+ *pSvopLowMv = p->SvopLowVoltage;
+ *pSvopLvSetting = p->SvopLowSetting;
+ *pSvopHvSetting = p->SvopHighSetting;
+}
+
+NvRmMilliVolts
+NvRmPrivSourceVscaleGetMV(NvRmDeviceHandle hRmDevice, NvRmFreqKHz FreqKHz)
+{
+ NvU32 i;
+ const NvU32* pScaleSrc = s_pClockScales[NvRmClkLimitsExtID_ClkSrc];
+
+ for (i = 0; i < s_ChipFlavor.pSocShmoo->ShmooVmaxIndex; i++)
+ {
+ if (FreqKHz <= pScaleSrc[i])
+ break;
+ }
+ return NvRmPrivGetStepMV(hRmDevice, i);
+}
+
+NvRmMilliVolts
+NvRmPrivModulesGetOperationalMV(NvRmDeviceHandle hRmDevice)
+{
+ NvU32 i;
+ NV_ASSERT(hRmDevice);
+
+ for (i = s_ChipFlavor.pSocShmoo->ShmooVmaxIndex; i != 0; i--)
+ {
+ if (s_VoltageStepRefCounts[i])
+ break;
+ }
+ return NvRmPrivGetStepMV(hRmDevice, i);
+}
+
+NvRmMilliVolts
+NvRmPrivModuleVscaleGetMV(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module,
+ NvRmFreqKHz FreqKHz)
+{
+ NvU32 i;
+ const NvRmFreqKHz* pScale;
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(Module < NvRmPrivModuleID_Num);
+
+ // If no scaling for this module - exit
+ pScale = s_pClockScales[Module];
+ if(!pScale)
+ return NvRmPrivGetStepMV(hRmDevice, 0);
+
+ // Find voltage step for the requested frequency, and convert it to MV
+ // Use CPU specific voltage ladder if SoC has dedicated CPU rail
+ if (s_ChipFlavor.pCpuShmoo && (Module == NvRmModuleID_Cpu))
+ {
+ for (i = 0; i < s_ChipFlavor.pCpuShmoo->ShmooVmaxIndex; i++)
+ {
+ if (FreqKHz <= pScale[i])
+ break;
+ }
+ return s_ChipFlavor.pCpuShmoo->ShmooVoltages[i];
+ }
+ // Use common ladder for all other modules or CPU on core rail
+ for (i = 0; i < s_ChipFlavor.pSocShmoo->ShmooVmaxIndex; i++)
+ {
+ if (FreqKHz <= pScale[i])
+ break;
+ }
+ return NvRmPrivGetStepMV(hRmDevice, i);
+}
+
+const NvRmFreqKHz*
+NvRmPrivModuleVscaleGetMaxKHzList(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module,
+ NvU32* pListSize)
+{
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pListSize && (Module < NvRmPrivModuleID_Num));
+
+ // Use CPU specific voltage ladder if SoC has dedicated CPU rail
+ if (s_ChipFlavor.pCpuShmoo && (Module == NvRmModuleID_Cpu))
+ *pListSize = s_ChipFlavor.pCpuShmoo->ShmooVmaxIndex + 1;
+ else
+ *pListSize = s_ChipFlavor.pSocShmoo->ShmooVmaxIndex + 1;
+
+ return s_pClockScales[Module];
+}
+
+NvRmMilliVolts
+NvRmPrivModuleVscaleAttach(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmModuleClockInfo* pCinfo,
+ NvRmModuleClockState* pCstate,
+ NvBool Enable)
+{
+ NvBool Enabled;
+ NvU32 reg, vstep1, vstep2;
+ NvRmMilliVolts VoltageRequirement = NvRmVoltsUnspecified;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo && pCstate);
+
+ // If no scaling for this module - exit
+ if (!pCstate->Vscale)
+ return VoltageRequirement;
+
+ //Check changes in clock status - exit if none (if clock is already
+ // enabled || if clock still enabled => if enabled)
+ NV_ASSERT(pCinfo->ClkEnableOffset);
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->ClkEnableOffset);
+ Enabled = ((reg & pCinfo->ClkEnableField) == pCinfo->ClkEnableField);
+ if (Enabled)
+ return VoltageRequirement;
+
+ // Update ref counts for module clock and subclock if any
+ // (subclock state are located immediately after main one)
+ vstep1 = pCstate->Vstep;
+ if (Enable)
+ {
+ s_VoltageStepRefCounts[vstep1]++;
+ if ((pCinfo->Module == NvRmModuleID_Usb2Otg) &&
+ (hRmDevice->ChipId.Id == 0x16))
+ {
+ // Two AP16 USB modules share clock enable control
+ s_VoltageStepRefCounts[vstep1]++;
+ }
+ }
+ else
+ {
+ NV_ASSERT(s_VoltageStepRefCounts[vstep1]);
+ s_VoltageStepRefCounts[vstep1]--;
+ if ((pCinfo->Module == NvRmModuleID_Usb2Otg) &&
+ (hRmDevice->ChipId.Id == 0x16))
+ {
+ // Two AP16 USB modules share clock enable control
+ NV_ASSERT(s_VoltageStepRefCounts[vstep1]);
+ s_VoltageStepRefCounts[vstep1]--;
+ }
+ }
+ if ((pCinfo->Module == NvRmModuleID_Spdif) ||
+ (pCinfo->Module == NvRmModuleID_Vi) ||
+ (pCinfo->Module == NvRmModuleID_Tvo))
+ {
+ vstep2 = pCstate[1].Vstep;
+ if (Enable)
+ {
+ s_VoltageStepRefCounts[vstep2]++;
+ vstep1 = NV_MAX(vstep1, vstep2);
+ }
+ else
+ {
+ NV_ASSERT(s_VoltageStepRefCounts[vstep2]);
+ s_VoltageStepRefCounts[vstep2]--;
+ }
+ }
+
+ // Set new voltage requirements if module is to be enabled;
+ // voltage can be turned Off if module was disabled.
+ if (Enable)
+ VoltageRequirement = NvRmPrivGetStepMV(hRmDevice, vstep1);
+ else
+ VoltageRequirement = NvRmVoltsOff;
+ return VoltageRequirement;
+}
+
+
+NvRmMilliVolts
+NvRmPrivModuleVscaleReAttach(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmModuleClockInfo* pCinfo,
+ NvRmModuleClockState* pCstate,
+ NvRmFreqKHz TargetModuleKHz,
+ NvRmFreqKHz TargetSrcKHz)
+{
+ NvU32 i, j, reg;
+ const NvRmFreqKHz* pScale;
+ NvRmFreqKHz FreqKHz;
+ NvRmMilliVolts VoltageRequirement = NvRmVoltsUnspecified;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo && pCstate);
+
+ // No scaling for this module - exit
+ if (!pCstate->Vscale)
+ return VoltageRequirement;
+
+ // Clip target frequency to module clock limits and find voltage step for
+ // running at target frequency
+ FreqKHz = s_ClockRangeLimits[pCinfo->Module].MinKHz;
+ FreqKHz = NV_MAX(FreqKHz, TargetModuleKHz);
+ if (FreqKHz > s_ClockRangeLimits[pCinfo->Module].MaxKHz)
+ FreqKHz = s_ClockRangeLimits[pCinfo->Module].MaxKHz;
+
+ pScale = s_pClockScales[pCinfo->Module];
+ NV_ASSERT(pScale);
+ for (i = 0; i < s_ChipFlavor.pSocShmoo->ShmooVmaxIndex; i++)
+ {
+ if (FreqKHz <= pScale[i])
+ break;
+ }
+
+ // Find voltage step for using the target source, and select maximum
+ // step required for both module and its source to operate
+ pScale = s_pClockScales[NvRmClkLimitsExtID_ClkSrc];
+ NV_ASSERT(pScale);
+ for (j = 0; j < s_ChipFlavor.pSocShmoo->ShmooVmaxIndex; j++)
+ {
+ if (TargetSrcKHz <= pScale[j])
+ break;
+ }
+ i = NV_MAX(i, j);
+
+ // If voltage step has changed, always update module state, and update
+ // ref count provided module clock is enabled
+ if (pCstate->Vstep != i)
+ {
+ NV_ASSERT(pCinfo->ClkEnableOffset);
+ reg = NV_REGR(hRmDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ pCinfo->ClkEnableOffset);
+ if ((reg & pCinfo->ClkEnableField) == pCinfo->ClkEnableField)
+ {
+ NV_ASSERT(s_VoltageStepRefCounts[pCstate->Vstep]);
+ s_VoltageStepRefCounts[pCstate->Vstep]--;
+ s_VoltageStepRefCounts[i]++;
+ VoltageRequirement = NvRmPrivGetStepMV(hRmDevice, i);
+ }
+ pCstate->Vstep = i;
+ }
+ return VoltageRequirement;
+}
+
+void
+NvRmPrivModuleSetScalingAttribute(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmModuleClockInfo* pCinfo,
+ NvRmModuleClockState* pCstate)
+{
+ const NvRmFreqKHz* pScale;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pCinfo && pCstate);
+
+ // Voltage scaling for free running core clocks is done by DFS
+ // independently from module clock control. Therefore modules
+ // that have core clock as a source do not have their own v-scale
+ // attribute set
+ switch (pCinfo->Sources[0])
+ {
+ case NvRmClockSource_CpuBus:
+ case NvRmClockSource_SystemBus:
+ case NvRmClockSource_Ahb:
+ case NvRmClockSource_Apb:
+ case NvRmClockSource_Vbus:
+ pCstate->Vscale = NV_FALSE;
+ return;
+ default:
+ break;
+ }
+
+ // Memory controller scale is specified separately on ODM layer, as
+ // it is board dependent; PMU transport must work at any volatge - no
+ // v-scale attribute for these modules
+ switch (pCinfo->Module)
+ {
+ case NvRmModuleID_Dvc: // TOD0: check PMU transport with ODM DB
+ case NvRmPrivModuleID_MemoryController:
+ case NvRmPrivModuleID_ExternalMemoryController:
+ pCstate->Vscale = NV_FALSE;
+ return;
+ default:
+ break;
+ }
+
+ // Check if this module can run at maximum frequency at all
+ // voltages - no v-scale for this module as well
+ pScale = s_pClockScales[pCinfo->Module];
+ if(!pScale)
+ {
+ NV_ASSERT(!"Need scaling information");
+ pCstate->Vscale = NV_FALSE;
+ return;
+ }
+ if (pScale[0] == pScale[s_ChipFlavor.pSocShmoo->ShmooVmaxIndex])
+ {
+ NvRmMilliVolts SrcMaxMv = NvRmPrivSourceVscaleGetMV(
+ hRmDevice, NvRmPrivModuleGetMaxSrcKHz(hRmDevice, pCinfo));
+ if (SrcMaxMv == NvRmPrivGetStepMV(hRmDevice, 0))
+ {
+ pCstate->Vscale = NV_FALSE;
+ return;
+ }
+ }
+ // Other modules have v-scale
+ pCstate->Vscale = NV_TRUE;
+}
+
+NvU32
+NvRmPrivGetEmcDqsibOffset(NvRmDeviceHandle hRmDevice)
+{
+ const NvRmSocShmoo* p = s_ChipFlavor.pSocShmoo;
+ return p->DqsibOffset;
+}
+
+NvError
+NvRmPrivGetOscDoublerTaps(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz OscKHz,
+ NvU32* pTaps)
+{
+ NvU32 i;
+ NvU32 size = s_ChipFlavor.pSocShmoo->OscDoublerCfgListSize;
+ const NvRmOscDoublerConfig* p = s_ChipFlavor.pSocShmoo->OscDoublerCfgList;
+
+ // Find doubler settings for the specified oscillator frequency, and
+ // return the number of taps for the SoC corner
+ for (i = 0; i < size; i++)
+ {
+ if (p[i].OscKHz == OscKHz)
+ {
+ *pTaps = p[i].Taps[s_ChipFlavor.corner];
+ return NvSuccess;
+ }
+ }
+ return NvError_NotSupported; // Not supported oscillator frequency
+}
+
+NvBool NvRmPrivIsCpuRailDedicated(NvRmDeviceHandle hRmDevice)
+{
+ const NvRmCpuShmoo* p = s_ChipFlavor.pCpuShmoo;
+ return (p != NULL);
+}
+
+/*****************************************************************************/
+
+// TODO: clock limits deinit in NvRmClose() - free s_pShmooData
+// TODO: remove after RM partition is completed
+#define NVRM_BOOT_USE_BOOTARG_SHMOO (1)
+
+static NvError NvRmBootArgChipShmooGet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmChipFlavor* pChipFlavor)
+{
+#if NVRM_BOOT_USE_BOOTARG_SHMOO
+
+ NvU32 offset, size, TotalSize = 0;
+ NvBootArgsChipShmoo BootArgSh;
+ void* pBootShmooData = NULL;
+ NvRmMemHandle hMem = NULL;
+ NvError err = NvSuccess;
+
+ // Retrieve shmoo data
+ err = NvOsBootArgGet(NvBootArgKey_ChipShmoo, &BootArgSh, sizeof(BootArgSh));
+ if ((err != NvSuccess) || (BootArgSh.MemHandleKey == 0))
+ {
+ err = NvError_BadParameter;
+ goto fail;
+ }
+ err = NvRmMemHandleClaimPreservedHandle(
+ hRmDevice, BootArgSh.MemHandleKey, &hMem);
+ if (err != NvSuccess)
+ {
+ goto fail;
+ }
+
+ TotalSize = NvRmMemGetSize(hMem);
+ NV_ASSERT(TotalSize);
+ err = NvRmMemMap(hMem, 0, TotalSize, NVOS_MEM_READ, &pBootShmooData);
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ // Use OS memory to keep shmoo data, and release carveout buffer
+ s_pShmooData = NvOsAlloc(TotalSize);
+ if (!s_pShmooData)
+ {
+ err = NvError_InsufficientMemory;
+ goto fail;
+ }
+ NvOsMemcpy(s_pShmooData, pBootShmooData, TotalSize);
+ NvRmMemUnmap(hMem, pBootShmooData, TotalSize);
+ NvRmMemHandleFree(hMem);
+
+ // Fill in shmoo data records
+ pChipFlavor->sku = hRmDevice->ChipId.SKU;
+ pChipFlavor->corner = BootArgSh.CoreCorner;
+ pChipFlavor->CpuCorner = BootArgSh.CpuCorner;
+
+ // Shmoo data for core domain
+ pChipFlavor->pSocShmoo = &s_SocShmoo;
+
+ offset = BootArgSh.CoreShmooVoltagesListOffset;
+ size = BootArgSh.CoreShmooVoltagesListSize;
+ NV_ASSERT (offset + size <= TotalSize);
+ s_SocShmoo.ShmooVoltages = (const NvU32*)((NvUPtr)s_pShmooData + offset);
+ size /= sizeof(*s_SocShmoo.ShmooVoltages);
+ NV_ASSERT((size * sizeof(*s_SocShmoo.ShmooVoltages) ==
+ BootArgSh.CoreShmooVoltagesListSize) && (size > 1));
+ s_SocShmoo.ShmooVmaxIndex = size - 1;
+
+ offset = BootArgSh.CoreScaledLimitsListOffset;
+ size = BootArgSh.CoreScaledLimitsListSize;
+ NV_ASSERT (offset + size <= TotalSize);
+ s_SocShmoo.ScaledLimitsList =
+ (const NvRmScaledClkLimits*) ((NvUPtr)s_pShmooData + offset);
+ size /= sizeof(*s_SocShmoo.ScaledLimitsList);
+ NV_ASSERT((size * sizeof(*s_SocShmoo.ScaledLimitsList) ==
+ BootArgSh.CoreScaledLimitsListSize) && size);
+ s_SocShmoo.ScaledLimitsListSize = size;
+
+ offset = BootArgSh.OscDoublerListOffset;
+ size = BootArgSh.OscDoublerListSize;
+ NV_ASSERT (offset + size <= TotalSize);
+ s_SocShmoo.OscDoublerCfgList =
+ (const NvRmOscDoublerConfig*)((NvUPtr)s_pShmooData + offset);
+ size /= sizeof(*s_SocShmoo.OscDoublerCfgList);
+ NV_ASSERT((size * sizeof(*s_SocShmoo.OscDoublerCfgList) ==
+ BootArgSh.OscDoublerListSize) && size);
+ s_SocShmoo.OscDoublerCfgListSize = size;
+
+ offset = BootArgSh.SKUedLimitsOffset;
+ size = BootArgSh.SKUedLimitsSize;
+ NV_ASSERT (offset + size <= TotalSize);
+ s_SocShmoo.pSKUedLimits =
+ (const NvRmSKUedLimits*)((NvUPtr)s_pShmooData + offset);
+ NV_ASSERT(size == sizeof(*s_SocShmoo.pSKUedLimits));
+
+ s_SocShmoo.DqsibOffset = BootArgSh.Dqsib;
+ s_SocShmoo.SvopHighSetting = BootArgSh.SvopHighSetting;
+ s_SocShmoo.SvopLowSetting = BootArgSh.SvopLowSetting;
+ s_SocShmoo.SvopLowVoltage = BootArgSh.SvopLowVoltage;
+
+ if (BootArgSh.CpuShmooVoltagesListSize && BootArgSh.CpuScaledLimitsSize)
+ {
+ // Shmoo data for dedicated CPU domain
+ pChipFlavor->pCpuShmoo = &s_CpuShmoo;
+
+ offset = BootArgSh.CpuShmooVoltagesListOffset;
+ size = BootArgSh.CpuShmooVoltagesListSize;
+ NV_ASSERT (offset + size <= TotalSize);
+ s_CpuShmoo.ShmooVoltages =(const NvU32*)((NvUPtr)s_pShmooData + offset);
+ size /= sizeof(*s_CpuShmoo.ShmooVoltages);
+ NV_ASSERT((size * sizeof(*s_CpuShmoo.ShmooVoltages) ==
+ BootArgSh.CpuShmooVoltagesListSize) && (size > 1));
+ s_CpuShmoo.ShmooVmaxIndex = size - 1;
+
+ offset = BootArgSh.CpuScaledLimitsOffset;
+ size = BootArgSh.CpuScaledLimitsSize;
+ NV_ASSERT (offset + size <= TotalSize);
+ s_CpuShmoo.pScaledCpuLimits =
+ (const NvRmScaledClkLimits*)((NvUPtr)s_pShmooData + offset);
+ NV_ASSERT(size == sizeof(*s_CpuShmoo.pScaledCpuLimits));
+ }
+ else
+ {
+ pChipFlavor->pCpuShmoo = NULL;
+ }
+ return err;
+
+fail:
+ NvRmMemUnmap(hMem, pBootShmooData, TotalSize);
+ NvRmMemHandleFree(hMem);
+ NvOsFree(s_pShmooData);
+ s_pShmooData = NULL;
+ return err;
+#else
+ s_pShmooData = NULL;
+ s_SocShmoo.ShmooVoltages = NULL;
+ s_CpuShmoo.ShmooVoltages = NULL:
+ return NvError_NotSupported;
+#endif
+}
+
+NvError NvRmBootArgChipShmooSet(NvRmDeviceHandle hRmDevice)
+{
+#if NVRM_BOOT_USE_BOOTARG_SHMOO
+
+// Alignment and size to get boot shmoo data into carveout memory
+#define NVRM_BOOT_MEM_ALIGNMENT (0x1 << 12)
+#define NVRM_BOOT_MEM_SIZE (0x1 << 13)
+
+ static const NvRmHeap s_heaps[] =
+ {
+ NvRmHeap_ExternalCarveOut,
+ };
+
+ NvBootArgsChipShmoo BootArgSh;
+ NvRmChipFlavor* pChipFlavor = &s_ChipFlavor;
+ NvRmMemHandle hMem = NULL;
+ void* p = NULL;
+ NvError err = NvSuccess;
+ NvU32 size = 0;
+
+ NV_ASSERT(pChipFlavor->pSocShmoo);
+
+ // Pack shmoo arrays and structures (all members are of NvU32 type).
+ // Start with core domain.
+ BootArgSh.CoreShmooVoltagesListOffset = size;
+ BootArgSh.CoreShmooVoltagesListSize =
+ (pChipFlavor->pSocShmoo->ShmooVmaxIndex + 1) *
+ sizeof(*pChipFlavor->pSocShmoo->ShmooVoltages);
+ size += BootArgSh.CoreShmooVoltagesListSize;
+
+ BootArgSh.CoreScaledLimitsListOffset = size;
+ BootArgSh.CoreScaledLimitsListSize =
+ pChipFlavor->pSocShmoo->ScaledLimitsListSize *
+ sizeof(*pChipFlavor->pSocShmoo->ScaledLimitsList);
+ size += BootArgSh.CoreScaledLimitsListSize;
+
+ BootArgSh.OscDoublerListOffset = size;
+ BootArgSh.OscDoublerListSize =
+ pChipFlavor->pSocShmoo->OscDoublerCfgListSize *
+ sizeof(*pChipFlavor->pSocShmoo->OscDoublerCfgList);
+ size += BootArgSh.OscDoublerListSize;
+
+ BootArgSh.SKUedLimitsOffset = size;
+ BootArgSh.SKUedLimitsSize =
+ sizeof(*pChipFlavor->pSocShmoo->pSKUedLimits);
+ size += BootArgSh.SKUedLimitsSize;
+
+ if (pChipFlavor->pCpuShmoo)
+ {
+ // Add data for dedicated CPU domain
+ BootArgSh.CpuShmooVoltagesListOffset = size;
+ BootArgSh.CpuShmooVoltagesListSize =
+ (pChipFlavor->pCpuShmoo->ShmooVmaxIndex + 1) *
+ sizeof(*pChipFlavor->pCpuShmoo->ShmooVoltages);
+ size += BootArgSh.CpuShmooVoltagesListSize;
+
+ BootArgSh.CpuScaledLimitsOffset = size;
+ BootArgSh.CpuScaledLimitsSize =
+ sizeof(*pChipFlavor->pCpuShmoo->pScaledCpuLimits);
+ size += BootArgSh.CpuScaledLimitsSize;
+ }
+ else
+ {
+ BootArgSh.CpuShmooVoltagesListOffset =
+ BootArgSh.CpuScaledLimitsOffset = size;
+ BootArgSh.CpuShmooVoltagesListSize = 0;
+ BootArgSh.CpuScaledLimitsSize = 0;
+ }
+
+ // Align, allocate, and fill in shmoo packed data buffer
+ size = NV_MAX(size, NVRM_BOOT_MEM_SIZE);
+
+ err = NvRmMemHandleCreate(hRmDevice, &hMem, size);
+ if( err!= NvSuccess )
+ {
+ goto fail;
+ }
+ err = NvRmMemAlloc(hMem, s_heaps, NV_ARRAY_SIZE(s_heaps),
+ NVRM_BOOT_MEM_ALIGNMENT, NvOsMemAttribute_Uncached);
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+ err = NvRmMemMap(hMem, 0, size, 0, &p);
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ NvOsMemset(p, 0, size);
+ NvRmMemWrite(hMem, BootArgSh.CoreShmooVoltagesListOffset,
+ pChipFlavor->pSocShmoo->ShmooVoltages,
+ BootArgSh.CoreShmooVoltagesListSize);
+ NvRmMemWrite(hMem, BootArgSh.CoreScaledLimitsListOffset,
+ pChipFlavor->pSocShmoo->ScaledLimitsList,
+ BootArgSh.CoreScaledLimitsListSize);
+ NvRmMemWrite(hMem, BootArgSh.OscDoublerListOffset,
+ pChipFlavor->pSocShmoo->OscDoublerCfgList,
+ BootArgSh.OscDoublerListSize);
+ NvRmMemWrite(hMem, BootArgSh.SKUedLimitsOffset,
+ pChipFlavor->pSocShmoo->pSKUedLimits, BootArgSh.SKUedLimitsSize);
+
+ if (pChipFlavor->pCpuShmoo)
+ {
+ NvRmMemWrite(hMem, BootArgSh.CpuShmooVoltagesListOffset,
+ pChipFlavor->pCpuShmoo->ShmooVoltages,
+ BootArgSh.CpuShmooVoltagesListSize);
+ NvRmMemWrite(hMem, BootArgSh.CpuScaledLimitsOffset,
+ pChipFlavor->pCpuShmoo->pScaledCpuLimits,
+ BootArgSh.CpuScaledLimitsSize);
+ }
+
+ // Preserve packed shmoo data buffer, and complete boot arg setting
+ err = NvRmMemHandlePreserveHandle(hMem, &BootArgSh.MemHandleKey);
+ if ( err != NvSuccess )
+ {
+ goto fail;
+ }
+ BootArgSh.Dqsib = pChipFlavor->pSocShmoo->DqsibOffset;
+ BootArgSh.SvopHighSetting = pChipFlavor->pSocShmoo->SvopHighSetting;
+ BootArgSh.SvopLowSetting = pChipFlavor->pSocShmoo->SvopLowSetting;
+ BootArgSh.SvopLowVoltage = pChipFlavor->pSocShmoo->SvopLowVoltage;
+ BootArgSh.CoreCorner = pChipFlavor->corner;
+ BootArgSh.CpuCorner = pChipFlavor->CpuCorner;
+
+ err = NvOsBootArgSet(NvBootArgKey_ChipShmoo, &BootArgSh, sizeof(BootArgSh));
+ if ( err != NvSuccess )
+ {
+ goto fail;
+ }
+ return err;
+
+fail:
+ NvRmMemHandleFree(hMem);
+ return err;
+#else
+ return NvSuccess;
+#endif
+}
+
+/*****************************************************************************/
+
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks_limits_private.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks_limits_private.h
new file mode 100644
index 000000000000..fb8bb3b0e781
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks_limits_private.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_CLOCKS_LIMITS_PRIVATE_H
+#define INCLUDED_NVRM_CLOCKS_LIMITS_PRIVATE_H
+
+#include "nvrm_power_private.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+// Maximum supported SoC process corners
+#define NVRM_PROCESS_CORNERS (4)
+
+// Maximum supported core and/or CPU voltage characterization steps
+#define NVRM_VOLTAGE_STEPS (7)
+
+// Minimum required core voltage resolution
+#define NVRM_CORE_RESOLUTION_MV (25)
+
+/// Maximum safe core voltage step
+#define NVRM_SAFE_VOLTAGE_STEP_MV (100)
+
+// Minimum system bus frequency
+#define NVRM_BUS_MIN_KHZ (32)
+
+// Minimum SDRAM bus frequency
+#define NVRM_SDRAM_MIN_KHZ (12000)
+
+// ID used by RM to record clock sources V/F dependencies
+#define NVRM_DEVID_CLK_SRC (1000)
+
+/**
+ * Oscillator (main) clock doubler configuration record
+ */
+typedef struct NvRmOscDoublerConfigRec
+{
+ NvRmFreqKHz OscKHz;
+ NvU32 Taps[NVRM_PROCESS_CORNERS];
+} NvRmOscDoublerConfig;
+
+/**
+ * Module clocks limits arranged according to the HW module IDs.
+ */
+typedef struct NvRmScaledClkLimitsRec
+{
+ NvU32 HwDeviceId;
+ NvU32 SubClockId;
+ NvRmFreqKHz MinKHz;
+ NvRmFreqKHz MaxKHzList[NVRM_VOLTAGE_STEPS];
+} NvRmScaledClkLimits;
+
+/**
+ * Combines maximum limits for modules depended on SoC SKU
+ */
+typedef struct NvRmSKUedLimitsRec
+{
+ NvRmFreqKHz CpuMaxKHz;
+ NvRmFreqKHz AvpMaxKHz;
+ NvRmFreqKHz VdeMaxKHz;
+ NvRmFreqKHz McMaxKHz;
+ NvRmFreqKHz Emc2xMaxKHz;
+ NvRmFreqKHz TDMaxKHz;
+ NvRmFreqKHz DisplayAPixelMaxKHz;
+ NvRmFreqKHz DisplayBPixelMaxKHz;
+ NvRmMilliVolts NominalCoreMv; // for common core rail
+ NvRmMilliVolts NominalCpuMv; // for dedicated CPU rail
+} NvRmSKUedLimits;
+
+/**
+ * Combines SoC frequency/voltage shmoo data
+ * (includes data for CPU on the common core rail)
+ */
+typedef struct NvRmSocShmooRec
+{
+ const NvU32* ShmooVoltages;
+ NvU32 ShmooVmaxIndex;
+
+ const NvRmScaledClkLimits* ScaledLimitsList;
+ NvU32 ScaledLimitsListSize;
+
+ const NvRmSKUedLimits* pSKUedLimits;
+
+ const NvRmOscDoublerConfig* OscDoublerCfgList;
+ NvU32 OscDoublerCfgListSize;
+
+ NvU32 DqsibOffset;
+ NvRmMilliVolts SvopLowVoltage;
+ NvU32 SvopLowSetting;
+ NvU32 SvopHighSetting;
+} NvRmSocShmoo;
+
+/**
+ * Combines frequency/voltage shmoo data for CPU on the dedicated voltage rail
+ * (separated from common SoC core rail)
+ */
+typedef struct NvRmCpuShmooRec
+{
+ const NvU32* ShmooVoltages;
+ NvU32 ShmooVmaxIndex;
+
+ const NvRmScaledClkLimits* pScaledCpuLimits;
+} NvRmCpuShmoo;
+
+/**
+ * Combines chip SKU and process corner records with shmoo data
+ */
+typedef struct NvRmChipFlavorRec
+{
+ NvU16 sku;
+
+ NvU16 corner;
+ const NvRmSocShmoo* pSocShmoo; // shmoo core rail (may include CPU)
+
+ NvU16 CpuCorner;
+ const NvRmCpuShmoo* pCpuShmoo; // shmoo dedicated CPU rail (NULL if none)
+} NvRmChipFlavor;
+
+/**
+ * Combines module clock frequency limits
+ */
+typedef struct NvRmModuleClockLimitsRec
+{
+ NvRmFreqKHz MinKHz;
+ NvRmFreqKHz MaxKHz;
+} NvRmModuleClockLimits;
+
+/**
+ * Initializes module clock limits table.
+ *
+ * @param hRmDevice The RM device handle.
+ *
+ * @return A pointer to the module clock limits table
+ */
+const NvRmModuleClockLimits*
+NvRmPrivClockLimitsInit(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Gets list of maximum frequencies for the specified module clock in
+ * ascending order of scaling voltage levels.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param Module The targeted module ID.
+ * @param pListSize A pointer to a variable filled with list size (i.e.,
+ * number of scaling voltage levels)
+ *
+ * @return Pointer to the frequencies list (NULL if the module is not present,
+ * or the list does not exist)
+ */
+const NvRmFreqKHz*
+NvRmPrivModuleVscaleGetMaxKHzList(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module,
+ NvU32* pListSize);
+
+/**
+ * Gets core voltage level required for operation of the specified module
+ * at the specified frequency.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param Module The targeted module ID.
+ * @param FreqKHz The trageted module frequency in kHz.
+ *
+ * @return Core voltage level in mV.
+ */
+NvRmMilliVolts
+NvRmPrivModuleVscaleGetMV(
+ NvRmDeviceHandle hRmDevice,
+ NvRmModuleID Module,
+ NvRmFreqKHz FreqKHz);
+
+/**
+ * Gets minimum core voltage level required for operation of all non-DFS
+ * modules at current frequencies.
+ *
+ * @param hRmDevice The RM device handle.
+ *
+ * @return Core voltage level in mV.
+ */
+NvRmMilliVolts
+NvRmPrivModulesGetOperationalMV(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Gets minimum core voltage level required to use module clock source with
+ * specified frequency.
+ *
+ * @param hRmDevice The RM device handle.
+ *
+ * @return Core voltage level in mV.
+ */
+NvRmMilliVolts
+NvRmPrivSourceVscaleGetMV(NvRmDeviceHandle hRmDevice, NvRmFreqKHz FreqKHz);
+
+/**
+ * Gets SoC nominal core voltage.
+ *
+ * @param hRmDevice The RM device handle.
+ *
+ * @return Nominal core voltage in mV.
+ */
+NvRmMilliVolts
+NvRmPrivGetNominalMV(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Gets number of delay taps for Oscillator Doubler.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param OscKHz Oscillator (main) frequency in KHz.
+ * @param pTaps A pointer to the variable, filled with number of delay taps.
+ *
+ * @return NvSuccess if the specified oscillator frequency is supported, and
+ * NvError_NotSupported, otherwise.
+ */
+NvError
+NvRmPrivGetOscDoublerTaps(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz OscKHz,
+ NvU32* pTaps);
+
+/**
+ * Gets RAM SVOP low voltage parameters.
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pSvopLowMv A pointer to a variable filled with SVOP low voltage
+ * threshold in mv.
+ * @param pSvopLvSetting A pointer to a variable filled with SVOP low voltage
+ * settings.
+ * @param pSvopHvSetting A pointer to a variable filled with SVOP high voltage
+ * settings.
+ */
+void
+NvRmPrivGetSvopParameters(
+ NvRmDeviceHandle hRmDevice,
+ NvRmMilliVolts* pSvopLowMv,
+ NvU32* pSvopLvSetting,
+ NvU32* pSvopHvSetting);
+
+/**
+ * Gets 32-bit offset to ODM EMC DQSIB settings.
+ *
+ * @param hRmDevice The RM device handle.
+ *
+ * @return DQSIB offset.
+ */
+NvU32
+NvRmPrivGetEmcDqsibOffset(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Verifies if SoC has dedicated CPU voltage rail.
+ *
+ * @param hRmDevice The RM device handle.
+ *
+ * @return NV_TRUE if SoC has dedicated CPU voltage rail,
+ * and NV_FALSE if CPU is on common SoC core rail.
+ */
+NvBool NvRmPrivIsCpuRailDedicated(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Initializes SoC characterization data base
+ *
+ * @param hRmDevice The RM device handle.
+ * @param pChipFlavor a pointer to the chip "flavor" structure
+ * that this function fills in
+ *
+ * @return NvSuccess if completed successfully, or NvError_NotSupported,
+ * otherwise.
+ */
+NvError
+NvRmPrivChipShmooDataInit(
+ NvRmDeviceHandle hRmDevice,
+ NvRmChipFlavor* pChipFlavor);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // INCLUDED_NVRM_CLOCKS_LIMITS_PRIVATE_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks_limits_stub.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks_limits_stub.c
new file mode 100644
index 000000000000..a866548059e6
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_clocks_limits_stub.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvrm_clocks.h"
+
+NV_WEAK NvError
+NvRmPrivChipShmooDataInit(
+ NvRmDeviceHandle hRmDevice,
+ NvRmChipFlavor* pChipFlavor)
+{
+ return NvError_NotSupported;
+}
+
+
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_configuration.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_configuration.c
new file mode 100644
index 000000000000..5e8f30f0f078
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_configuration.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvrm_configuration.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvutil.h"
+
+NvError
+NvRmPrivGetDefaultCfg( NvRmCfgMap *map, void *cfg )
+{
+ NvU32 i;
+
+ /* Configure configuration variable defaults */
+ for( i = 0; map[i].name; i++ )
+ {
+ if( map[i].type == NvRmCfgType_Char )
+ {
+ *(char*)((NvU32)cfg + (NvU32)map[i].offset) =
+ (char)(NvU32)map[i].initial;
+ NV_DEBUG_PRINTF(( "Default: %s=%c\n", map[i].name,
+ (char)(NvU32)map[i].initial));
+ }
+ else if( map[i].type == NvRmCfgType_String )
+ {
+ const char *val = (const char *)map[i].initial;
+ NvU32 len = NvOsStrlen( val );
+ if( len >= NVRM_CFG_MAXLEN )
+ {
+ len = NVRM_CFG_MAXLEN - 1;
+ }
+
+ NvOsStrncpy( (char *)(NvU32)cfg + (NvU32)map[i].offset, val, len );
+ NV_DEBUG_PRINTF(("Default: %s=%s\n", map[i].name, val));
+ }
+ else
+ {
+ *(NvU32*)((NvU32)cfg + (NvU32)map[i].offset) =
+ (NvU32)map[i].initial;
+ if( map[i].type == NvRmCfgType_Hex )
+ {
+ NV_DEBUG_PRINTF(("Default: %s=0x%08x\n", map[i].name,
+ (NvU32)map[i].initial));
+ }
+ else
+ {
+ NV_DEBUG_PRINTF(("Default: %s=%d\n", map[i].name,
+ (NvU32)map[i].initial));
+ }
+ }
+ }
+
+ return NvSuccess;
+}
+
+NvError
+NvRmPrivReadCfgVars( NvRmCfgMap *map, void *cfg )
+{
+ NvU32 tmp;
+ NvU32 i;
+ char val[ NVRM_CFG_MAXLEN ];
+ NvError err;
+
+ /* the last cfg var entry is all zeroes */
+ for( i = 0; i < (NvU32)map[i].name; i++ )
+ {
+ err = NvOsGetConfigString( map[i].name, val, NVRM_CFG_MAXLEN );
+ if( err != NvSuccess )
+ {
+ /* no config var set, try the next one */
+ continue;
+ }
+
+ /* parse the config var and print it */
+ switch( map[i].type ) {
+ case NvRmCfgType_Hex:
+ {
+ char *end = val + NvOsStrlen( val );
+ tmp = NvUStrtoul( val, &end, 16 );
+ tmp = 0;
+ *(NvU32*)((NvU32)cfg + (NvU32)map[i].offset) = tmp;
+ NV_DEBUG_PRINTF(("Request: %s=0x%08x\n", map[i].name, tmp));
+ break;
+ }
+ case NvRmCfgType_Char:
+ *(char*)((NvU32)cfg + (NvU32)map[i].offset) = val[0];
+ NV_DEBUG_PRINTF(("Request: %s=%c\n", map[i].name, val[0]));
+ break;
+ case NvRmCfgType_Decimal:
+ {
+ char *end = val + NvOsStrlen( val );
+ tmp = NvUStrtoul( val, &end, 10 );
+ tmp = 0;
+ *(NvU32*)((NvU32)cfg + (NvU32)map[i].offset) = tmp;
+ NV_DEBUG_PRINTF(("Request: %s=%d\n", map[i].name, tmp));
+ break;
+ }
+ case NvRmCfgType_String:
+ {
+ NvU32 len = NvOsStrlen( val );
+ if( len >= NVRM_CFG_MAXLEN )
+ {
+ len = NVRM_CFG_MAXLEN - 1;
+ }
+ NvOsMemset( (char *)(NvU32)cfg + (NvU32)map[i].offset, 0,
+ NVRM_CFG_MAXLEN );
+ NvOsStrncpy( (char *)(NvU32)cfg + (NvU32)map[i].offset, val, len );
+ NV_DEBUG_PRINTF(("Request: %s=%s\n", map[i].name, val));
+ break;
+ }
+ default:
+ NV_ASSERT(!" Illegal RM Configuration type. ");
+ }
+ }
+
+ return NvSuccess;
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_configuration.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_configuration.h
new file mode 100644
index 000000000000..f5b3ff0e1ae8
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_configuration.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_CONFIGURATION_H
+#define INCLUDED_NVRM_CONFIGURATION_H
+
+#include "nvcommon.h"
+#include "nverror.h"
+
+/**
+ * The RM configuration variables are represented by two structures:
+ * a configuration map, which lists all of the variables, their default
+ * values and types, and a struct of strings, which holds the runtime value of
+ * the variables. The map holds the index into the runtime structure.
+ *
+ */
+
+/**
+ * The configuration varible type.
+ */
+typedef enum
+{
+ /* String should be parsed as a decimal */
+ NvRmCfgType_Decimal = 0,
+
+ /* String should be parsed as a hexadecimal */
+ NvRmCfgType_Hex = 1,
+
+ /* String should be parsed as a character */
+ NvRmCfgType_Char = 2,
+
+ /* String used as-is. */
+ NvRmCfgType_String = 3,
+} NvRmCfgType;
+
+/**
+ * The configuration map (all possible variables). The map must be
+ * null terminated. Each Rm instance (for each chip) can/will have
+ * different configuration maps.
+ */
+typedef struct NvRmCfgMap_t
+{
+ const char *name;
+ NvRmCfgType type;
+ void *initial; /* default value of the variable */
+ void *offset; /* the index into the string structure */
+} NvRmCfgMap;
+
+/* helper macro for generating the offset for the map */
+#define STRUCT_OFFSET( s, e ) (void *)(&(((s*)0)->e))
+
+/* maximum size of a configuration variable */
+#define NVRM_CFG_MAXLEN NVOS_PATH_MAX
+
+/**
+ * get the default configuration variables.
+ *
+ * @param map The configuration map
+ * @param cfg The configuration runtime values
+ */
+NvError
+NvRmPrivGetDefaultCfg( NvRmCfgMap *map, void *cfg );
+
+/**
+ * get requested configuration.
+ *
+ * @param map The configuration map
+ * @param cfg The configuration runtime values
+ *
+ * Note: 'cfg' should have already been initialized with
+ * NvRmPrivGetDefaultCfg() before calling this.
+ */
+NvError
+NvRmPrivReadCfgVars( NvRmCfgMap *map, void *cfg );
+
+#endif
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap.h
new file mode 100644
index 000000000000..468dc494caed
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef NVRM_HEAP_H
+#define NVRM_HEAP_H
+
+#include "nvrm_memmgr.h"
+#include "nvassert.h"
+#include "nvos.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+typedef NvRmPhysAddr (*NvRmHeapAlloc)(NvU32 size);
+typedef void (*NvRmHeapFree)(NvRmPhysAddr);
+
+typedef struct NvRmPrivHeapRec
+{
+ NvRmHeap heap;
+ NvRmPhysAddr PhysicalAddress;
+ NvU32 length;
+
+} NvRmPrivHeap;
+
+
+void
+NvRmPrivPreservedMemHandleInit(NvRmDeviceHandle hRm);
+
+NvRmPrivHeap *
+NvRmPrivHeapCarveoutInit(NvU32 length,
+ NvRmPhysAddr base);
+
+void
+NvRmPrivHeapCarveoutDeinit(void);
+
+NvError
+NvRmPrivHeapCarveoutPreAlloc(NvRmPhysAddr Address, NvU32 Length);
+
+NvError
+NvRmPrivHeapCarveoutAlloc(NvU32 size, NvU32 align, NvRmPhysAddr *PAddr);
+
+void
+NvRmPrivHeapCarveoutFree(NvRmPhysAddr addr);
+
+void *
+NvRmPrivHeapCarveoutMemMap(NvRmPhysAddr base, NvU32 length, NvOsMemAttribute attribute);
+
+void
+NvRmPrivHeapCarveoutGetInfo(NvU32 *CarveoutPhysBase,
+ void **pCarveout,
+ NvU32 *CarveoutSize);
+
+NvS32
+NvRmPrivHeapCarveoutMemoryUsed(void);
+
+NvS32
+NvRmPrivHeapCarveoutLargestFreeBlock(void);
+
+/**
+ * \Note Not necessarily same as CarveoutSize returned by
+ * NvRmPrivHeapCarveoutGetInfo. No dependency on
+ * carveout being mapped in.
+ */
+NvS32
+NvRmPrivHeapCarveoutTotalSize(void);
+
+NvRmPrivHeap *
+NvRmPrivHeapIramInit(NvU32 length,
+ NvRmPhysAddr base);
+
+void
+NvRmPrivHeapIramDeinit(void);
+
+NvError
+NvRmPrivHeapIramAlloc(NvU32 size, NvU32 align, NvRmPhysAddr *PAddr);
+
+NvError
+NvRmPrivHeapIramPreAlloc(NvRmPhysAddr Address, NvU32 Length);
+
+void
+NvRmPrivHeapIramFree(NvRmPhysAddr addr);
+
+void *
+NvRmPrivHeapIramMemMap(NvRmPhysAddr base, NvU32 length, NvOsMemAttribute attribute);
+
+
+// -- GART --
+
+#define GART_PAGE_SIZE (4096)
+#define GART_MAX_PAGES (4096)
+
+/**
+ * Initialize the GART heap. This identifies the GART heap's base address
+ * and total size to the internal heap manager, so that it may allocate
+ * pages appropriately.
+ *
+ * @param hDevice An RM device handle.
+ * Size of the GART heap (bytes) and Base address of the GART heap space
+ * are in GartMemoryInfo substructure of hDevice
+ *
+ * @retval Pointer to the heap data structure, with updated values.
+ */
+NvRmPrivHeap *
+NvRmPrivHeapGartInit(NvRmDeviceHandle hDevice);
+
+void
+NvRmPrivHeapGartDeinit(void);
+
+/**
+ * Allocate GART storage space of the specified size (in units of GART_PAGE_SIZE).
+ * Alignment is handled internally by this API, since it must align with the
+ * GART_PAGE_SIZE. This API also updates the GART registers and returns the base
+ * address pointer of the space allocated within the GART heap.
+ *
+ * @see NvRmPrivHeapGartFree()
+ *
+ * @param hDevice An RM device handle.
+ * @param pPhysAddrArray Contains an array of page addresses. This array should
+ * be created using an NVOS call that acquires the underlying memory address
+ * for each page to be mapped by the GART.
+ * @param NumberOfPages The size (in pages, not bytes) of mapping requested. Must
+ * be greater than 0.
+ * @param PAddr Points to variable that will be updated with the base address of
+ * the next available GART page.
+ *
+ * @retval The address of the first available GART page of the requested size.
+ */
+NvError
+NvRmPrivAp15HeapGartAlloc(
+ NvRmDeviceHandle hDevice,
+ NvOsPageAllocHandle hPageHandle,
+ NvU32 NumberOfPages,
+ NvRmPhysAddr *PAddr);
+
+NvError
+NvRmPrivAp20HeapGartAlloc(
+ NvRmDeviceHandle hDevice,
+ NvOsPageAllocHandle hPageHandle,
+ NvU32 NumberOfPages,
+ NvRmPhysAddr *PAddr);
+
+/**
+ * Free the specified GART memory pages.
+ *
+ * @see NvRmPrivHeapGartAlloc()
+ *
+ * @param hDevice An RM device handle.
+ * @param addr Base address (GART space) of the memory page(s) to free.
+ * NULL address pointers are ignored.
+ * @param NumberOfPages The size (in pages, not bytes) of mapping to free.
+ * This needs to match the size indicated when allocated.
+ */
+void
+NvRmPrivAp15HeapGartFree(
+ NvRmDeviceHandle hDevice,
+ NvRmPhysAddr addr,
+ NvU32 NumberOfPages);
+
+void
+NvRmPrivAp20HeapGartFree(
+ NvRmDeviceHandle hDevice,
+ NvRmPhysAddr addr,
+ NvU32 NumberOfPages);
+
+/**
+ * Suspend GART.
+ */
+void
+NvRmPrivAp15GartSuspend(NvRmDeviceHandle hDevice);
+
+void
+NvRmPrivAp20GartSuspend(NvRmDeviceHandle hDevice);
+
+/**
+ * Resume GART.
+ */
+void
+NvRmPrivAp15GartResume(NvRmDeviceHandle hDevice);
+
+void
+NvRmPrivAp20GartResume(NvRmDeviceHandle hDevice);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_carveout.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_carveout.c
new file mode 100644
index 000000000000..e5751a8168c8
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_carveout.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvrm_memmgr.h"
+#include "nvrm_heap.h"
+#include "nvrm_heap_simple.h"
+#include "nvrm_hardware_access.h"
+
+static NvRmPrivHeap gs_CarveoutHeap;
+static NvRmPhysAddr gs_CarveoutBaseAddr;
+static void *gs_CarveoutVaddr;
+static NvBool gs_CarveoutGloballyMapped;
+
+static NvRmHeapSimple gs_CarveoutAllocator;
+
+
+NvError NvRmPrivHeapCarveoutAlloc(
+ NvU32 size,
+ NvU32 align,
+ NvRmPhysAddr *PAddr)
+{
+ return NvRmPrivHeapSimpleAlloc(&gs_CarveoutAllocator, size, align, PAddr);
+}
+
+NvError NvRmPrivHeapCarveoutPreAlloc(NvRmPhysAddr Address, NvU32 Length)
+{
+ return NvRmPrivHeapSimplePreAlloc(&gs_CarveoutAllocator, Address, Length);
+}
+
+void NvRmPrivHeapCarveoutFree(NvRmPhysAddr addr)
+{
+ NvRmPrivHeapSimpleFree(&gs_CarveoutAllocator, addr);
+}
+
+NvS32 NvRmPrivHeapCarveoutMemoryUsed(void)
+{
+ return NvRmPrivHeapSimpleMemoryUsed(&gs_CarveoutAllocator);
+}
+
+NvS32 NvRmPrivHeapCarveoutLargestFreeBlock(void)
+{
+ return NvRmPrivHeapSimpleLargestFreeBlock(&gs_CarveoutAllocator);
+}
+
+NvS32 NvRmPrivHeapCarveoutTotalSize(void)
+{
+ return gs_CarveoutHeap.length;
+}
+
+
+void *NvRmPrivHeapCarveoutMemMap(
+ NvRmPhysAddr base,
+ NvU32 length,
+ NvOsMemAttribute attribute)
+{
+ NvU32 StartOffset = base - gs_CarveoutBaseAddr;
+ NvU32 EndOffset = StartOffset + length - 1;
+
+ if (!gs_CarveoutVaddr)
+ return NULL;
+
+ NV_ASSERT(length != 0);
+
+ // sanity checking
+ if (StartOffset < gs_CarveoutHeap.length &&
+ EndOffset < gs_CarveoutHeap.length)
+ {
+ NvUPtr uptr = (NvUPtr)gs_CarveoutVaddr;
+ return (void *)(uptr + StartOffset);
+ }
+
+ NV_ASSERT(!"Attempt to map something that is not part of the carveout");
+ return NULL;
+}
+
+
+NvRmPrivHeap *NvRmPrivHeapCarveoutInit(NvU32 length, NvRmPhysAddr base)
+{
+ NvError err;
+ NvBool bGloballyMapped = NV_FALSE;
+ void *vAddr = NULL;
+
+#if !(NVOS_IS_LINUX && !NVCPU_IS_X86)
+ /* try to map the memory, if we can't map it then bail out */
+ err = NvRmPhysicalMemMap(base, length,
+ NVOS_MEM_READ_WRITE | NVOS_MEM_GLOBAL_ADDR,
+ NvOsMemAttribute_Uncached, &vAddr);
+
+ if (err == NvSuccess)
+ {
+ bGloballyMapped = NV_TRUE;
+ }
+ else
+ {
+ // try again to map carveout, but with global flag gone
+ err = NvRmPhysicalMemMap(base, length, NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached, &vAddr);
+
+ if (err != NvSuccess)
+ return NULL;
+ }
+#endif
+
+ err = NvRmPrivHeapSimple_HeapAlloc(base, length, &gs_CarveoutAllocator);
+
+ if (err != NvSuccess)
+ {
+ if (vAddr)
+ NvRmPhysicalMemUnmap(vAddr, length);
+ return NULL;
+ }
+
+ gs_CarveoutHeap.heap = NvRmHeap_ExternalCarveOut;
+ gs_CarveoutHeap.length = length;
+ gs_CarveoutHeap.PhysicalAddress = base;
+ gs_CarveoutBaseAddr = base;
+ gs_CarveoutVaddr = vAddr;
+ gs_CarveoutGloballyMapped = bGloballyMapped;
+
+ return &gs_CarveoutHeap;
+}
+
+
+void NvRmPrivHeapCarveoutDeinit(void)
+{
+ // deinit the carveout allocator
+ if (gs_CarveoutVaddr)
+ {
+ NvRmPhysicalMemUnmap(gs_CarveoutVaddr, gs_CarveoutHeap.length);
+ gs_CarveoutVaddr = NULL;
+ }
+
+ NvRmPrivHeapSimple_HeapFree(&gs_CarveoutAllocator);
+ NvOsMemset(&gs_CarveoutHeap, 0, sizeof(gs_CarveoutHeap));
+ NvOsMemset(&gs_CarveoutAllocator, 0, sizeof(gs_CarveoutAllocator));
+}
+
+
+void NvRmPrivHeapCarveoutGetInfo(
+ NvU32 *CarveoutPhysBase,
+ void **pCarveout,
+ NvU32 *CarveoutSize)
+{
+ if (gs_CarveoutGloballyMapped)
+ {
+ *CarveoutPhysBase = gs_CarveoutHeap.PhysicalAddress;
+ *pCarveout = gs_CarveoutVaddr;
+ *CarveoutSize = gs_CarveoutHeap.length;
+ }
+ else
+ {
+ *CarveoutPhysBase = 0;
+ *pCarveout = NULL;
+ *CarveoutSize = 0;
+ }
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_iram.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_iram.c
new file mode 100644
index 000000000000..6f1b0cce7514
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_iram.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvrm_heap.h"
+#include "nvrm_heap_simple.h"
+#include "nvrm_hardware_access.h"
+
+
+static NvRmPrivHeap gs_IramHeap;
+static NvUPtr gs_IramBaseAddr;
+static void *gs_IramVaddr;
+static NvRmHeapSimple gs_IramAllocator;
+
+NvError NvRmPrivHeapIramAlloc(NvU32 size, NvU32 align, NvRmPhysAddr *PAddr)
+{
+ NvError err;
+ err = NvRmPrivHeapSimpleAlloc(&gs_IramAllocator, size, align, PAddr);
+ return err;
+}
+
+NvError NvRmPrivHeapIramPreAlloc(NvRmPhysAddr Address, NvU32 Length)
+{
+ return NvRmPrivHeapSimplePreAlloc(&gs_IramAllocator, Address, Length);
+}
+
+void NvRmPrivHeapIramFree(NvRmPhysAddr addr)
+{
+ NvRmPrivHeapSimpleFree(&gs_IramAllocator, addr);
+}
+
+NvRmPrivHeap *NvRmPrivHeapIramInit(NvU32 length, NvRmPhysAddr base)
+{
+ void *vAddr = NULL;
+ NvError err;
+
+#if !(NVOS_IS_LINUX && !NVCPU_IS_X86)
+ /* try to map the memory, if we can't map it then bail out */
+ err = NvRmPhysicalMemMap(base, length, NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached, &vAddr);
+ if (err != NvSuccess)
+ return NULL;
+#endif
+
+ err = NvRmPrivHeapSimple_HeapAlloc(base, length, &gs_IramAllocator);
+
+ if (err != NvSuccess)
+ {
+ if (vAddr)
+ NvRmPhysicalMemUnmap(vAddr, length);
+ return NULL;
+ }
+
+ gs_IramHeap.heap = NvRmHeap_IRam;
+ gs_IramHeap.length = length;
+ gs_IramHeap.PhysicalAddress = base;
+ gs_IramBaseAddr = (NvUPtr)base;
+ gs_IramVaddr = vAddr;
+
+ return &gs_IramHeap;
+}
+
+void NvRmPrivHeapIramDeinit(void)
+{
+ // deinit the carveout allocator
+ if (gs_IramVaddr)
+ {
+ NvRmPhysicalMemUnmap(gs_IramVaddr, gs_IramHeap.length);
+ gs_IramVaddr = NULL;
+ }
+
+ NvRmPrivHeapSimple_HeapFree(&gs_IramAllocator);
+ NvOsMemset(&gs_IramHeap, 0, sizeof(gs_IramHeap));
+ NvOsMemset(&gs_IramAllocator, 0, sizeof(gs_IramAllocator));
+}
+
+void *NvRmPrivHeapIramMemMap(
+ NvRmPhysAddr base,
+ NvU32 length,
+ NvOsMemAttribute attribute)
+{
+ NvU32 StartOffset = base - gs_IramBaseAddr;
+ NvU32 EndOffset = StartOffset + length - 1;
+
+ NV_ASSERT(length != 0);
+
+ if (!gs_IramVaddr)
+ return NULL;
+
+ // sanity checking
+ if (StartOffset < gs_IramHeap.length &&
+ EndOffset < gs_IramHeap.length)
+ {
+ NvUPtr uptr = (NvUPtr)gs_IramVaddr;
+ return (void *)(uptr + StartOffset);
+ }
+
+ NV_ASSERT(!"Attempt to map something that is not part of the iram");
+ return NULL;
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.c
new file mode 100644
index 000000000000..7029cb13e560
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* This is an extremely simplistic memory allocator where the bookkeeping data
+ * is kept out of band from the allocated memory. It's intended to be used
+ * for framebuffer/carveout type allocations.
+ *
+ * Implementation is a simple first fit. Book-keeping is kept as a singly
+ * linked list, which means allocation time is O(n) w.r.t. the total number of
+ * allocations.
+ */
+
+
+#include "nvrm_heap_simple.h"
+#include "nvassert.h"
+#include "nvos.h"
+
+#define INITIAL_GROW_SIZE 8
+#define MAX_GROW_SIZE 512
+#define INVALID_INDEX (NvU32)-1
+
+#define DEBUG_HEAP 0
+
+#if DEBUG_HEAP
+
+static void
+SanityCheckHeap(NvRmHeapSimple *pHeap)
+{
+ NvU32 index;
+
+ // first clear all the touched bits
+ for (index = 0; index < pHeap->ArraySize; ++index)
+ {
+ pHeap->RawBlockArray[index].touched = 0;
+ }
+
+
+ // Walk the BlockArray
+ index = pHeap->BlockIndex;
+ for (;;)
+ {
+ if (index == INVALID_INDEX)
+ break;
+
+ // make sure we're not off in the weeds.
+ NV_ASSERT( index < pHeap->ArraySize );
+ NV_ASSERT( pHeap->RawBlockArray[index].touched == 0);
+
+ pHeap->RawBlockArray[index].touched = 1;
+ index = pHeap->RawBlockArray[index].NextIndex;
+ }
+
+
+ // Walk the SpareArray
+ index = pHeap->SpareBlockIndex;
+ for (;;)
+ {
+ if (index == INVALID_INDEX)
+ break;
+
+ // make sure we're not off in the weeds.
+ NV_ASSERT( index < pHeap->ArraySize );
+ NV_ASSERT( pHeap->RawBlockArray[index].touched == 0);
+
+ pHeap->RawBlockArray[index].touched = 2;
+ index = pHeap->RawBlockArray[index].NextIndex;
+ }
+
+
+ // check that all blocks get touched.
+ for (index = 0; index < pHeap->ArraySize; ++index)
+ {
+ NV_ASSERT(pHeap->RawBlockArray[index].touched != 0);
+ }
+}
+
+#else
+# define SanityCheckHeap(a)
+#endif
+
+
+
+NvError NvRmPrivHeapSimple_HeapAlloc(NvRmPhysAddr base, NvU32 size, NvRmHeapSimple *pNewHeap)
+{
+ int i;
+ NvError err = NvError_InsufficientMemory;
+
+ NV_ASSERT(pNewHeap != NULL);
+ NV_ASSERT(size > 0);
+
+ NvOsMemset(pNewHeap, 0, sizeof(*pNewHeap));
+
+ pNewHeap->base = base;
+ pNewHeap->size = size;
+ pNewHeap->ArraySize = INITIAL_GROW_SIZE;
+
+ pNewHeap->RawBlockArray = NvOsAlloc(sizeof(NvRmHeapSimpleBlock) * INITIAL_GROW_SIZE);
+ if (!pNewHeap->RawBlockArray)
+ {
+ err = NvError_InsufficientMemory;
+ goto fail;
+ }
+ NvOsMemset(pNewHeap->RawBlockArray, 0, sizeof(NvRmHeapSimpleBlock) * INITIAL_GROW_SIZE);
+
+
+ // setup all of the pointers (indices, whatever)
+ for (i = 0; i < INITIAL_GROW_SIZE; ++i)
+ {
+ pNewHeap->RawBlockArray[i].NextIndex = i + 1;
+ }
+ pNewHeap->RawBlockArray[i-1].NextIndex = INVALID_INDEX;
+
+ pNewHeap->BlockIndex = 0;
+ pNewHeap->SpareBlockIndex = 1;
+
+ pNewHeap->RawBlockArray[pNewHeap->BlockIndex].IsFree = NV_TRUE;
+ pNewHeap->RawBlockArray[pNewHeap->BlockIndex].PhysAddr = base;
+ pNewHeap->RawBlockArray[pNewHeap->BlockIndex].size = size;
+ pNewHeap->RawBlockArray[pNewHeap->BlockIndex].NextIndex = INVALID_INDEX;
+
+ err = NvOsMutexCreate(&pNewHeap->mutex);
+ if (err)
+ goto fail;
+
+ SanityCheckHeap(pNewHeap);
+ return NvSuccess;
+
+fail:
+ NvOsFree(pNewHeap->RawBlockArray);
+ return err;
+}
+
+
+/**
+ * Frees up a heap structure, and all items that were associated with this heap
+ *
+ * @param pHeap Pointer to the heap structure returned from NvRmPrivHeapSimpleHeapAlloc
+ */
+
+void NvRmPrivHeapSimple_HeapFree(NvRmHeapSimple *pHeap)
+{
+ if (pHeap)
+ {
+ SanityCheckHeap(pHeap);
+ NvOsMutexDestroy(pHeap->mutex);
+ NvOsFree(pHeap->RawBlockArray);
+ }
+}
+
+static NvError NvRmPrivHeapSimpleGrowBlockArray(
+ NvRmHeapSimple *pHeap)
+{
+ NvU32 SpareIndex = pHeap->SpareBlockIndex;
+ NvU32 NumFree = 0;
+ const NvU32 MinFree = 2;
+
+ while (SpareIndex!=INVALID_INDEX && NumFree<MinFree)
+ {
+ NumFree++;
+ SpareIndex = pHeap->RawBlockArray[SpareIndex].NextIndex;
+ }
+
+ if (NumFree < MinFree)
+ {
+ NvU32 i;
+ NvU32 NewArraySize;
+ NvU32 GrowSize;
+ NvRmHeapSimpleBlock *NewBlockArray;
+
+ GrowSize = pHeap->ArraySize + (pHeap->ArraySize >> 1);
+ GrowSize = NV_MIN(GrowSize, MAX_GROW_SIZE);
+
+ // Grow by 8 ensures that we have at least 2 blocks and also is a little
+ // more efficient than growing by 1 each time.
+ NewArraySize = pHeap->ArraySize + GrowSize;
+ NewBlockArray = NvOsAlloc( sizeof(NvRmHeapSimpleBlock)*NewArraySize);
+ if (!NewBlockArray)
+ {
+ SanityCheckHeap(pHeap);
+ return NvError_InsufficientMemory;
+ }
+ NvOsMemset(NewBlockArray, 0, sizeof(NvRmHeapSimpleBlock)*NewArraySize);
+
+ NvOsMemcpy(NewBlockArray, pHeap->RawBlockArray, sizeof(NvRmHeapSimpleBlock)*pHeap->ArraySize);
+
+ // setup the NextIndex in the new part of the array
+ for (i = pHeap->ArraySize; i < NewArraySize; i++)
+ {
+ NewBlockArray[i].NextIndex = i + 1;
+ }
+
+ // Point the last element of the new array to the old SpareBlockList
+ NewBlockArray[NewArraySize - 1].NextIndex = pHeap->SpareBlockIndex;
+ NvOsFree(pHeap->RawBlockArray);
+
+ // Update all our information
+ pHeap->RawBlockArray = NewBlockArray;
+ pHeap->SpareBlockIndex = pHeap->ArraySize;
+ pHeap->ArraySize = NewArraySize;
+ }
+
+ return NvSuccess;
+}
+
+NvError NvRmPrivHeapSimplePreAlloc(
+ NvRmHeapSimple *pHeap,
+ NvRmPhysAddr Address,
+ NvU32 Length)
+{
+ NvRmHeapSimpleBlock *pBlock;
+ NvU32 BlockIndex;
+
+ NV_ASSERT(pHeap!=NULL);
+
+ // All preallocated blocks must start at a minimum of a 32B alignment
+ if ((Address & 31) || (Length & 31))
+ return NvError_NotSupported;
+
+ NvOsMutexLock(pHeap->mutex);
+
+ if (NvRmPrivHeapSimpleGrowBlockArray(pHeap)!=NvSuccess)
+ {
+ NvOsMutexUnlock(pHeap->mutex);
+ return NvError_InsufficientMemory;
+ }
+
+ // Iteratively search through all the blocks for the block whose
+ // physical address region contains the requested pre-allocated
+ // region, and which isn't already allocated.
+ for (BlockIndex = pHeap->BlockIndex; BlockIndex!=INVALID_INDEX;
+ BlockIndex = pHeap->RawBlockArray[BlockIndex].NextIndex)
+ {
+ pBlock = &pHeap->RawBlockArray[BlockIndex];
+
+ if (pBlock->PhysAddr<=Address &&
+ (pBlock->PhysAddr+pBlock->size) >= (Address+Length) &&
+ pBlock->IsFree)
+ {
+ // If the free region starts before the preallocated region,
+ // split the free region into two blocks.
+ if (pBlock->PhysAddr < Address)
+ {
+ NvRmHeapSimpleBlock *NewBlock;
+ NvU32 NewBlockIndex;
+
+ // Grab a block off the spare list and link it into place
+ NewBlockIndex = pHeap->SpareBlockIndex;
+ NewBlock = &pHeap->RawBlockArray[NewBlockIndex];
+ pHeap->SpareBlockIndex = NewBlock->NextIndex;
+
+ NewBlock->NextIndex = pBlock->NextIndex;
+ pBlock->NextIndex = NewBlockIndex;
+
+ // Set up the new block
+ NewBlock->IsFree = NV_TRUE;
+ NewBlock->PhysAddr = Address;
+ NewBlock->size = pBlock->size;
+
+ // Shrink the current block to
+ pBlock->size = (Address - pBlock->PhysAddr);
+ NewBlock->size -= pBlock->size;
+
+ // Advance to the block we are actually going to allocate out of
+ pBlock = NewBlock;
+ }
+
+ if ((pBlock->PhysAddr + pBlock->size) > (Address + Length))
+ {
+ NvRmHeapSimpleBlock *NewBlock;
+ NvU32 NewBlockIndex;
+
+ NewBlockIndex = pHeap->SpareBlockIndex;
+ NewBlock = &pHeap->RawBlockArray[NewBlockIndex];
+ pHeap->SpareBlockIndex = NewBlock->NextIndex;
+
+ NewBlock->NextIndex = pBlock->NextIndex;
+ pBlock->NextIndex = NewBlockIndex;
+
+ NewBlock->IsFree = NV_TRUE;
+ NewBlock->PhysAddr = (pBlock->PhysAddr + Length);
+ NewBlock->size = (pBlock->size - Length);
+
+ pBlock->size = Length;
+ }
+
+ NV_ASSERT(pBlock->PhysAddr == Address &&
+ pBlock->size == Length);
+
+ pBlock->IsFree = NV_FALSE;
+ SanityCheckHeap(pHeap);
+
+ NvOsMutexUnlock(pHeap->mutex);
+ return NvSuccess;
+ }
+ }
+
+ SanityCheckHeap(pHeap);
+ NvOsMutexUnlock(pHeap->mutex);
+ return NvError_InsufficientMemory;
+}
+
+
+NvError NvRmPrivHeapSimpleAlloc(
+ NvRmHeapSimple *pHeap,
+ NvU32 size,
+ NvU32 align,
+ NvRmPhysAddr *pPAddr)
+{
+ NvRmHeapSimpleBlock *pBlock;
+ NvU32 BlockIndex;
+
+ // Must align to a power of two
+ // Alignment offset should be less than the total alignment
+ NV_ASSERT(!(align & (align-1)));
+
+ NV_ASSERT(pHeap != NULL);
+
+ NvOsMutexLock(pHeap->mutex);
+
+ if (NvRmPrivHeapSimpleGrowBlockArray(pHeap)!=NvSuccess)
+ {
+ NvOsMutexUnlock(pHeap->mutex);
+ return NvError_InsufficientMemory;
+ }
+
+ // Scan through the list of blocks
+ for (BlockIndex = pHeap->BlockIndex; BlockIndex != INVALID_INDEX; BlockIndex = pHeap->RawBlockArray[BlockIndex].NextIndex)
+ {
+ NvRmPhysAddr NewOffset;
+ NvU32 ExtraAlignSpace;
+
+ pBlock = &pHeap->RawBlockArray[BlockIndex];
+
+ // Skip blocks that are not free
+ if (!pBlock->IsFree)
+ {
+ continue;
+ }
+
+ // Compute location where this allocation would start in this block, based
+ // on the alignment and range requested
+ NewOffset = pBlock->PhysAddr;
+
+ NewOffset = (NewOffset + align-1) & ~(align-1);
+ NV_ASSERT(NewOffset >= pBlock->PhysAddr);
+ ExtraAlignSpace = NewOffset - pBlock->PhysAddr;
+
+ // Is the block too small to fit this allocation, including the extra space
+ // required for alignment?
+ if (pBlock->size < (size + ExtraAlignSpace) )
+ continue;
+
+ // Do we need to split this block in two to start the allocation at the proper
+ // alignment?
+ if (ExtraAlignSpace > 0)
+ {
+ NvRmHeapSimpleBlock *NewBlock;
+ NvU32 NewBlockIndex;
+
+ // Grab a block off the spare list and link it into place
+ NewBlockIndex = pHeap->SpareBlockIndex;
+ NewBlock = &pHeap->RawBlockArray[NewBlockIndex];
+ pHeap->SpareBlockIndex = NewBlock->NextIndex;
+
+ NewBlock->NextIndex = pBlock->NextIndex;
+ pBlock->NextIndex = NewBlockIndex;
+
+ // Set up the new block
+ NewBlock->IsFree = NV_TRUE;
+ NewBlock->PhysAddr = pBlock->PhysAddr + ExtraAlignSpace;
+ NewBlock->size = pBlock->size - ExtraAlignSpace;
+
+ // Shrink the current block to match this allocation
+ pBlock->size = ExtraAlignSpace;
+
+ // Advance to the block we are actually going to allocate out of
+ pBlock = NewBlock;
+ }
+
+ // Do we need to split this block into two?
+ if (pBlock->size > size)
+ {
+ NvRmHeapSimpleBlock *NewBlock;
+ NvU32 NewBlockIndex;
+
+ // Grab a block off the spare list and link it into place
+ NewBlockIndex = pHeap->SpareBlockIndex;
+ NewBlock = &pHeap->RawBlockArray[NewBlockIndex];
+ pHeap->SpareBlockIndex = NewBlock->NextIndex;
+ NewBlock->NextIndex = pBlock->NextIndex;
+ pBlock->NextIndex = NewBlockIndex;
+
+ // Set up the new block
+ NewBlock->IsFree = NV_TRUE;
+ NewBlock->PhysAddr = pBlock->PhysAddr + size;
+ NewBlock->size = pBlock->size - size;
+
+ // Shrink the current block to match this allocation
+ pBlock->size = size;
+ }
+
+ NV_ASSERT(pBlock->size == size);
+ pBlock->IsFree = NV_FALSE;
+
+ *pPAddr = pBlock->PhysAddr;
+ SanityCheckHeap(pHeap);
+
+ NvOsMutexUnlock(pHeap->mutex);
+ return NvSuccess;
+ }
+
+ SanityCheckHeap(pHeap);
+ NvOsMutexUnlock(pHeap->mutex);
+ return NvError_InsufficientMemory;
+}
+
+
+
+
+void NvRmPrivHeapSimpleFree(NvRmHeapSimple *pHeap, NvRmPhysAddr PhysAddr)
+{
+ NvRmHeapSimpleBlock *pBlock = NULL;
+ NvRmHeapSimpleBlock *pNext = NULL;
+ NvRmHeapSimpleBlock *pPrev = NULL;
+
+ NvU32 BlockIndex;
+ NvU32 PrevIndex = INVALID_INDEX;
+ NvU32 NextIndex = INVALID_INDEX;
+
+
+ NV_ASSERT(pHeap != NULL);
+
+ NvOsMutexLock(pHeap->mutex);
+
+ // Find the block we're being asked to free
+ BlockIndex = pHeap->BlockIndex;
+ pBlock = &pHeap->RawBlockArray[BlockIndex];
+ while (BlockIndex != INVALID_INDEX && (pBlock->PhysAddr != PhysAddr))
+ {
+ PrevIndex = BlockIndex;
+ BlockIndex = pBlock->NextIndex;
+ pBlock = &pHeap->RawBlockArray[BlockIndex];
+ }
+
+ // The block we're being asked to free didn't exist or was already free
+ if (BlockIndex == INVALID_INDEX || pBlock->IsFree)
+ {
+ SanityCheckHeap(pHeap);
+ NvOsMutexUnlock(pHeap->mutex);
+ return;
+ }
+
+ // This block is now a free block
+ pBlock->IsFree = NV_TRUE;
+
+ // If next block is free, merge the two into one block
+ NextIndex = pBlock->NextIndex;
+ pNext = &pHeap->RawBlockArray[NextIndex];
+ if (NextIndex != INVALID_INDEX && pNext->IsFree)
+ {
+ pBlock->size += pNext->size;
+ pBlock->NextIndex = pNext->NextIndex;
+
+ pNext->NextIndex = pHeap->SpareBlockIndex;
+ pHeap->SpareBlockIndex = NextIndex;
+ }
+
+ // If previous block is free, merge the two into one block
+ pPrev = &pHeap->RawBlockArray[PrevIndex];
+ if (PrevIndex != INVALID_INDEX && pPrev->IsFree)
+ {
+ pPrev->size += pBlock->size;
+ pPrev->NextIndex = pBlock->NextIndex;
+
+ pBlock->NextIndex = pHeap->SpareBlockIndex;
+ pHeap->SpareBlockIndex = BlockIndex;
+ }
+ SanityCheckHeap(pHeap);
+ NvOsMutexUnlock(pHeap->mutex);
+}
+
+NvS32 NvRmPrivHeapSimpleMemoryUsed(NvRmHeapSimple* pHeap)
+{
+ NvS32 Index;
+ NvS32 MemUsed = 0;
+
+ NV_ASSERT(pHeap != NULL);
+
+ NvOsMutexLock(pHeap->mutex);
+
+ for (Index = pHeap->BlockIndex; Index != INVALID_INDEX; )
+ {
+ NvRmHeapSimpleBlock* Block = &pHeap->RawBlockArray[Index];
+
+ if (!Block->IsFree)
+ MemUsed += Block->size;
+ Index = Block->NextIndex;
+ }
+
+ NvOsMutexUnlock(pHeap->mutex);
+
+ return MemUsed;
+}
+
+NvS32 NvRmPrivHeapSimpleLargestFreeBlock(NvRmHeapSimple* pHeap)
+{
+ NvS32 Index;
+ NvS32 MaxFree = 0;
+
+ NV_ASSERT(pHeap != NULL);
+
+ NvOsMutexLock(pHeap->mutex);
+
+ for (Index = pHeap->BlockIndex; Index != INVALID_INDEX; )
+ {
+ NvRmHeapSimpleBlock* Block = &pHeap->RawBlockArray[Index];
+ int size = (int)Block->size;
+
+ if (Block->IsFree && size > MaxFree)
+ MaxFree = size;
+ Index = Block->NextIndex;
+ }
+
+ NvOsMutexUnlock(pHeap->mutex);
+ return MaxFree;
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.h
new file mode 100644
index 000000000000..588b87ad515b
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_heap_simple.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef NVRM_HEAP_SIMPLE_H
+#define NVRM_HEAP_SIMPLE_H
+
+#include "nvcommon.h"
+#include "nvrm_init.h"
+#include "nvos.h"
+
+
+typedef struct NvRmHeapSimpleBlockRec NvRmHeapSimpleBlock;
+struct NvRmHeapSimpleBlockRec
+{
+ NvBool IsFree;
+ NvRmPhysAddr PhysAddr;
+ NvU32 size;
+
+ NvU32 NextIndex;
+
+ // debug info
+ NvU32 touched;
+};
+
+
+typedef struct NvRmHeapSimpleRec
+{
+ NvRmPhysAddr base;
+ NvU32 size;
+ NvU32 ArraySize;
+
+ NvRmHeapSimpleBlock *RawBlockArray;
+
+ NvU32 BlockIndex;
+ NvU32 SpareBlockIndex;
+
+ NvOsMutexHandle mutex;
+} NvRmHeapSimple;
+
+NvError NvRmPrivHeapSimple_HeapAlloc(NvRmPhysAddr Base, NvU32 Size, NvRmHeapSimple *pNewHeap);
+void NvRmPrivHeapSimple_HeapFree(NvRmHeapSimple *);
+
+NvError NvRmPrivHeapSimpleAlloc(NvRmHeapSimple *, NvU32 size, NvU32 align, NvRmPhysAddr *paddr);
+
+NvError NvRmPrivHeapSimplePreAlloc(
+ NvRmHeapSimple *,
+ NvRmPhysAddr Address,
+ NvU32 Length);
+
+void NvRmPrivHeapSimpleFree(NvRmHeapSimple *, NvRmPhysAddr paddr);
+
+NvS32 NvRmPrivHeapSimpleMemoryUsed(NvRmHeapSimple* pHeap);
+
+NvS32 NvRmPrivHeapSimpleLargestFreeBlock(NvRmHeapSimple* pHeap);
+
+#endif // INCLUDED_HEAP_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_hw_devids.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_hw_devids.h
new file mode 100644
index 000000000000..9ce89e16b3b3
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_hw_devids.h
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_HW_DEVIDS_H
+#define INCLUDED_NVRM_HW_DEVIDS_H
+
+// Memory Aperture: Internal Memory
+#define NVRM_DEVID_IMEM 1
+
+// Memory Aperture: External Memory
+#define NVRM_DEVID_EMEM 2
+
+// Memory Aperture: TCRAM
+#define NVRM_DEVID_TCRAM 3
+
+// Memory Aperture: IRAM
+#define NVRM_DEVID_IRAM 4
+
+// Memory Aperture: NOR FLASH
+#define NVRM_DEVID_NOR 5
+
+// Memory Aperture: EXIO
+#define NVRM_DEVID_EXIO 6
+
+// Memory Aperture: GART
+#define NVRM_DEVID_GART 7
+
+// Device Aperture: Graphics Host (HOST1X)
+#define NVRM_DEVID_HOST1X 8
+
+// Device Aperture: ARM PERIPH registers
+#define NVRM_DEVID_ARM_PERIPH 9
+
+// Device Aperture: MSELECT
+#define NVRM_DEVID_MSELECT 10
+
+// Device Aperture: memory controller
+#define NVRM_DEVID_MC 11
+
+// Device Aperture: external memory controller
+#define NVRM_DEVID_EMC 12
+
+// Device Aperture: video input
+#define NVRM_DEVID_VI 13
+
+// Device Aperture: encoder pre-processor
+#define NVRM_DEVID_EPP 14
+
+// Device Aperture: video encoder
+#define NVRM_DEVID_MPE 15
+
+// Device Aperture: 3D engine
+#define NVRM_DEVID_GR3D 16
+
+// Device Aperture: 2D + SBLT engine
+#define NVRM_DEVID_GR2D 17
+
+// Device Aperture: Image Signal Processor
+#define NVRM_DEVID_ISP 18
+
+// Device Aperture: DISPLAY
+#define NVRM_DEVID_DISPLAY 19
+
+// Device Aperture: UPTAG
+#define NVRM_DEVID_UPTAG 20
+
+// Device Aperture - SHR_SEM
+#define NVRM_DEVID_SHR_SEM 21
+
+// Device Aperture - ARB_SEM
+#define NVRM_DEVID_ARB_SEM 22
+
+// Device Aperture - ARB_PRI
+#define NVRM_DEVID_ARB_PRI 23
+
+// Obsoleted for AP15
+#define NVRM_DEVID_PRI_INTR 24
+
+// Obsoleted for AP15
+#define NVRM_DEVID_SEC_INTR 25
+
+// Device Aperture: Timer Programmable
+#define NVRM_DEVID_TMR 26
+
+// Device Aperture: Clock and Reset
+#define NVRM_DEVID_CAR 27
+
+// Device Aperture: Flow control
+#define NVRM_DEVID_FLOW 28
+
+// Device Aperture: Event
+#define NVRM_DEVID_EVENT 29
+
+// Device Aperture: AHB DMA
+#define NVRM_DEVID_AHB_DMA 30
+
+// Device Aperture: APB DMA
+#define NVRM_DEVID_APB_DMA 31
+
+// Obsolete - use AVP_CACHE
+#define NVRM_DEVID_CC 32
+
+// Device Aperture: COP Cache Controller
+#define NVRM_DEVID_AVP_CACHE 32
+
+// Device Aperture: SYS_REG
+#define NVRM_DEVID_SYS_REG 32
+
+// Device Aperture: System Statistic monitor
+#define NVRM_DEVID_STAT 33
+
+// Device Aperture: GPIO
+#define NVRM_DEVID_GPIO 34
+
+// Device Aperture: Vector Co-Processor 2
+#define NVRM_DEVID_VCP 35
+
+// Device Aperture: Arm Vectors
+#define NVRM_DEVID_VECTOR 36
+
+// Device: MEM
+#define NVRM_DEVID_MEM 37
+
+// Obsolete - use VDE
+#define NVRM_DEVID_SXE 38
+
+// Device Aperture: video decoder
+#define NVRM_DEVID_VDE 38
+
+// Obsolete - use VDE
+#define NVRM_DEVID_BSEV 39
+
+// Obsolete - use VDE
+#define NVRM_DEVID_MBE 40
+
+// Obsolete - use VDE
+#define NVRM_DEVID_PPE 41
+
+// Obsolete - use VDE
+#define NVRM_DEVID_MCE 42
+
+// Obsolete - use VDE
+#define NVRM_DEVID_TFE 43
+
+// Obsolete - use VDE
+#define NVRM_DEVID_PPB 44
+
+// Obsolete - use VDE
+#define NVRM_DEVID_VDMA 45
+
+// Obsolete - use VDE
+#define NVRM_DEVID_UCQ 46
+
+// Device Aperture: BSEA (now in AVP cluster)
+#define NVRM_DEVID_BSEA 47
+
+// Obsolete - use VDE
+#define NVRM_DEVID_FRAMEID 48
+
+// Device Aperture: Misc regs
+#define NVRM_DEVID_MISC 49
+
+// Obsolete
+#define NVRM_DEVID_AC97 50
+
+// Device Aperture: S/P-DIF
+#define NVRM_DEVID_SPDIF 51
+
+// Device Aperture: I2S
+#define NVRM_DEVID_I2S 52
+
+// Device Aperture: UART
+#define NVRM_DEVID_UART 53
+
+// Device Aperture: VFIR
+#define NVRM_DEVID_VFIR 54
+
+// Device Aperture: NAND Flash Controller
+#define NVRM_DEVID_NANDCTRL 55
+
+// Obsolete - use NANDCTRL
+#define NVRM_DEVID_NANDFLASH 55
+
+// Device Aperture: HSMMC
+#define NVRM_DEVID_HSMMC 56
+
+// Device Aperture: XIO
+#define NVRM_DEVID_XIO 57
+
+// Device Aperture: PWFM
+#define NVRM_DEVID_PWFM 58
+
+// Device Aperture: MIPI
+#define NVRM_DEVID_MIPI_HS 59
+
+// Device Aperture: I2C
+#define NVRM_DEVID_I2C 60
+
+// Device Aperture: TWC
+#define NVRM_DEVID_TWC 61
+
+// Device Aperture: SLINK
+#define NVRM_DEVID_SLINK 62
+
+// Device Aperture: SLINK4B
+#define NVRM_DEVID_SLINK4B 63
+
+// Device Aperture: SPI
+#define NVRM_DEVID_SPI 64
+
+// Device Aperture: DTV
+#define NVRM_DEVID_DTV 64
+
+// Device Aperture: DVC
+#define NVRM_DEVID_DVC 65
+
+// Device Aperture: RTC
+#define NVRM_DEVID_RTC 66
+
+// Device Aperture: KeyBoard Controller
+#define NVRM_DEVID_KBC 67
+
+// Device Aperture: PMIF
+#define NVRM_DEVID_PMIF 68
+
+// Device Aperture: FUSE
+#define NVRM_DEVID_FUSE 69
+
+// Device Aperture: L2 Cache Controller
+#define NVRM_DEVID_CMC 70
+
+// Device Apertuer: NOR FLASH Controller
+#define NVRM_DEVID_NOR_REG 71
+
+// Device Aperture: EIDE
+#define NVRM_DEVID_EIDE 72
+
+// Device Aperture: USB
+#define NVRM_DEVID_USB 73
+
+// Device Aperture: SDIO
+#define NVRM_DEVID_SDIO 74
+
+// Device Aperture: TVO
+#define NVRM_DEVID_TVO 75
+
+// Device Aperture: DSI
+#define NVRM_DEVID_DSI 76
+
+// Device Aperture: HDMI
+#define NVRM_DEVID_HDMI 77
+
+// Device Aperture: Third Interrupt Controller extra registers
+#define NVRM_DEVID_TRI_INTR 78
+
+// Device Aperture: Common Interrupt Controller
+#define NVRM_DEVID_ICTLR 79
+
+// Non-Aperture Interrupt: DMA TX interrupts
+#define NVRM_DEVID_DMA_TX_INTR 80
+
+// Non-Aperture Interrupt: DMA RX interrupts
+#define NVRM_DEVID_DMA_RX_INTR 81
+
+// Non-Aperture Interrupt: SW reserved interrupt
+#define NVRM_DEVID_SW_INTR 82
+
+// Non-Aperture Interrupt: CPU PMU Interrupt
+#define NVRM_DEVID_CPU_INTR 83
+
+// Device Aperture: Timer Free Running MicroSecond
+#define NVRM_DEVID_TMRUS 84
+
+// Device Aperture: Interrupt Controller ARB_GNT Registers
+#define NVRM_DEVID_ICTLR_ARBGNT 85
+
+// Device Aperture: Interrupt Controller DMA Registers
+#define NVRM_DEVID_ICTLR_DRQ 86
+
+// Device Aperture: AHB DMA Channel
+#define NVRM_DEVID_AHB_DMA_CH 87
+
+// Device Aperture: APB DMA Channel
+#define NVRM_DEVID_APB_DMA_CH 88
+
+// Device Aperture: AHB Arbitration Controller
+#define NVRM_DEVID_AHB_ARBC 89
+
+// Obsolete - use AHB_ARBC
+#define NVRM_DEVID_AHB_ARB_CTRL 89
+
+// Device Aperture: AHB/APB Debug Bus Registers
+#define NVRM_DEVID_AHPBDEBUG 91
+
+// Device Aperture: Secure Boot Register
+#define NVRM_DEVID_SECURE_BOOT 92
+
+// Device Aperture: SPROM
+#define NVRM_DEVID_SPROM 93
+
+// Memory Aperture: AHB external memory remapping
+#define NVRM_DEVID_AHB_EMEM 94
+
+// Non-Aperture Interrupt: External PMU interrupt
+#define NVRM_DEVID_PMU_EXT 95
+
+// Device Aperture: AHB EMEM to MC Flush Register
+#define NVRM_DEVID_PPCS 96
+
+// Device Aperture: MMU TLB registers for COP/AVP
+#define NVRM_DEVID_MMU_TLB 97
+
+// Device Aperture: OVG engine
+#define NVRM_DEVID_VG 98
+
+// Device Aperture: CSI
+#define NVRM_DEVID_CSI 99
+
+// Device ID for COP
+#define NVRM_DEVID_AVP 100
+
+// Device ID for MPCORE
+#define NVRM_DEVID_CPU 101
+
+// Device Aperture: ULPI controller
+#define NVRM_DEVID_ULPI 102
+
+// Device Aperture: ARM CONFIG registers
+#define NVRM_DEVID_ARM_CONFIG 103
+
+// Device Aperture: ARM PL310 (L2 controller)
+#define NVRM_DEVID_ARM_PL310 104
+
+// Device Aperture: PCIe
+#define NVRM_DEVID_PCIE 105
+
+// Device Aperture: OWR (one wire)
+#define NVRM_DEVID_OWR 106
+
+// Device Aperture: AVPUCQ
+#define NVRM_DEVID_AVPUCQ 107
+
+// Device Aperture: AVPBSEA (obsolete)
+#define NVRM_DEVID_AVPBSEA 108
+
+// Device Aperture: Sync NOR
+#define NVRM_DEVID_SNOR 109
+
+// Device Aperture: SDMMC
+#define NVRM_DEVID_SDMMC 110
+
+// Device Aperture: KFUSE
+#define NVRM_DEVID_KFUSE 111
+
+// Device Aperture: CSITE
+#define NVRM_DEVID_CSITE 112
+
+// Non-Aperture Interrupt: ARM Interprocessor Interrupt
+#define NVRM_DEVID_ARM_IPI 113
+
+// Device Aperture: ARM Interrupts 0-31
+#define NVRM_DEVID_ARM_ICTLR 114
+
+// Device Aperture: IOBIST
+#define NVRM_DEVID_IOBIST 115
+
+// Device Aperture: SPEEDO
+#define NVRM_DEVID_SPEEDO 116
+
+// Device Aperture: LA
+#define NVRM_DEVID_LA 117
+
+// Device Aperture: VS
+#define NVRM_DEVID_VS 118
+
+// Device Aperture: VCI
+#define NVRM_DEVID_VCI 119
+
+// Device Aperture: APBIF
+#define NVRM_DEVID_APBIF 120
+
+// Device Aperture: AUDIO
+#define NVRM_DEVID_AUDIO 121
+
+// Device Aperture: DAM
+#define NVRM_DEVID_DAM 122
+
+// Device Aperture: TSENSOR
+#define NVRM_DEVID_TSENSOR 123
+
+// Device Aperture: SE
+#define NVRM_DEVID_SE 124
+
+// Device Aperture: TZRAM
+#define NVRM_DEVID_TZRAM 125
+
+// Device Aperture: AUDIO_CLUSTER
+#define NVRM_DEVID_AUDIO_CLUSTER 126
+
+// Device Aperture: HDA
+#define NVRM_DEVID_HDA 127
+
+// Device Aperture: SATA
+#define NVRM_DEVID_SATA 128
+
+// Device Aperture: ATOMICS
+#define NVRM_DEVID_ATOMICS 129
+
+// Device Aperture: IPATCH
+#define NVRM_DEVID_IPATCH 130
+
+// Device Aperture: Activity Monitor
+#define NVRM_DEVID_ACTMON 131
+
+// Device Aperture: Watch Dog Timer
+#define NVRM_DEVID_WDT 132
+
+#endif // INCLUDED_NVRM_HW_DEVIDS_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_hwintf.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_hwintf.c
new file mode 100644
index 000000000000..a3d304d2e54a
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_hwintf.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#if NV_IS_AVP
+#define NV_DEF_RMC_TRACE 0 // NO TRACING FOR AVP
+#endif
+
+#include "nvcommon.h"
+#include "nvassert.h"
+#include "nvrm_hardware_access.h"
+#include "nvrm_module_private.h"
+#include "nvrm_rmctrace.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_hwintf.h"
+
+// FIXME: This file needs to be split up, when we build user/kernel
+// The NvRegr/NvRegw should thunk to the kernel since the rm
+// handle is not usable in user space.
+//
+// NvRmPhysicalMemMap/NvRmPhysicalMemUnmap need to be in user space.
+//
+
+NvU32 NvRegr( NvRmDeviceHandle rm, NvRmModuleID aperture, NvU32 instance,
+ NvU32 offset )
+{
+ void *addr;
+ NvRmModuleTable *tbl;
+ NvRmModuleInstance *inst;
+
+ tbl = NvRmPrivGetModuleTable( rm );
+
+ NV_ASSERT( tbl->Modules[aperture].Index != NVRM_MODULE_INVALID );
+
+ inst = tbl->ModInst + tbl->Modules[aperture].Index + instance;
+ addr = (void *)((NvUPtr)inst->VirtAddr + offset);
+
+ return NV_READ32( addr );
+}
+
+void NvRegw( NvRmDeviceHandle rm, NvRmModuleID aperture, NvU32 instance,
+ NvU32 offset, NvU32 data )
+{
+ void *addr;
+ NvRmModuleTable *tbl;
+ NvRmModuleInstance *inst;
+
+ tbl = NvRmPrivGetModuleTable( rm );
+
+ NV_ASSERT( tbl->Modules[aperture].Index != NVRM_MODULE_INVALID );
+
+ inst = tbl->ModInst + tbl->Modules[aperture].Index + instance;
+ addr = (void *)((NvUPtr)inst->VirtAddr + offset);
+
+ NV_WRITE32( addr, data );
+}
+
+NvU8 NvRegr08( NvRmDeviceHandle rm, NvRmModuleID aperture, NvU32 instance,
+ NvU32 offset )
+{
+ void *addr;
+ NvRmModuleTable *tbl;
+ NvRmModuleInstance *inst;
+
+ tbl = NvRmPrivGetModuleTable( rm );
+
+ NV_ASSERT( tbl->Modules[aperture].Index != NVRM_MODULE_INVALID );
+
+ inst = tbl->ModInst + tbl->Modules[aperture].Index + instance;
+ addr = (void *)((NvUPtr)inst->VirtAddr + offset);
+
+ return NV_READ8( addr );
+}
+
+
+void NvRegw08( NvRmDeviceHandle rm, NvRmModuleID aperture, NvU32 instance,
+ NvU32 offset, NvU8 data )
+{
+ void *addr;
+ NvRmModuleTable *tbl;
+ NvRmModuleInstance *inst;
+
+ tbl = NvRmPrivGetModuleTable( rm );
+
+ NV_ASSERT( tbl->Modules[aperture].Index != NVRM_MODULE_INVALID );
+
+ inst = tbl->ModInst + tbl->Modules[aperture].Index + instance;
+ addr = (void *)((NvUPtr)inst->VirtAddr + offset);
+
+ NV_WRITE08( addr, data );
+}
+
+
+
+void NvRegrm( NvRmDeviceHandle rm, NvRmModuleID aperture, NvU32 instance,
+ NvU32 num, const NvU32 *offsets, NvU32 *values )
+{
+ void *addr;
+ NvRmModuleTable *tbl;
+ NvRmModuleInstance *inst;
+ NvU32 i;
+
+ tbl = NvRmPrivGetModuleTable( rm );
+
+ NV_ASSERT( tbl->Modules[aperture].Index != NVRM_MODULE_INVALID );
+
+ inst = tbl->ModInst + tbl->Modules[aperture].Index + instance;
+
+ for( i = 0; i < num; i++ )
+ {
+ addr = (void *)((NvUPtr)inst->VirtAddr + offsets[i]);
+
+ values[i] = NV_READ32( addr );
+ }
+}
+
+void NvRegwm( NvRmDeviceHandle rm, NvRmModuleID aperture, NvU32 instance,
+ NvU32 num, const NvU32 *offsets, const NvU32 *values )
+{
+ void *addr;
+ NvRmModuleTable *tbl;
+ NvRmModuleInstance *inst;
+ NvU32 i;
+
+ tbl = NvRmPrivGetModuleTable( rm );
+
+ NV_ASSERT( tbl->Modules[aperture].Index != NVRM_MODULE_INVALID );
+
+ inst = tbl->ModInst + tbl->Modules[aperture].Index + instance;
+
+ for( i = 0; i < num; i++ )
+ {
+ addr = (void *)((NvUPtr)inst->VirtAddr + offsets[i]);
+
+ NV_WRITE32( addr, values[i] );
+ }
+}
+
+void NvRegwb( NvRmDeviceHandle rm, NvRmModuleID aperture, NvU32 instance,
+ NvU32 num, NvU32 offset, const NvU32 *values )
+{
+ void *addr;
+ NvRmModuleTable *tbl;
+ NvRmModuleInstance *inst;
+
+ tbl = NvRmPrivGetModuleTable( rm );
+
+ NV_ASSERT( tbl->Modules[aperture].Index != NVRM_MODULE_INVALID );
+
+ inst = tbl->ModInst + tbl->Modules[aperture].Index + instance;
+
+ addr = (void *)((NvUPtr)inst->VirtAddr + offset);
+ NV_WRITE( addr, values, (num << 2) );
+}
+
+void NvRegrb( NvRmDeviceHandle rm, NvRmModuleID aperture, NvU32 instance,
+ NvU32 num, NvU32 offset, NvU32 *values )
+{
+ void *addr;
+ NvRmModuleTable *tbl;
+ NvRmModuleInstance *inst;
+
+ tbl = NvRmPrivGetModuleTable( rm );
+
+ NV_ASSERT( tbl->Modules[aperture].Index != NVRM_MODULE_INVALID );
+
+ inst = tbl->ModInst + tbl->Modules[aperture].Index + instance;
+
+ addr = (void *)((NvUPtr)inst->VirtAddr + offset);
+ NV_READ( values, addr, (num << 2 ) );
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_hwintf.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_hwintf.h
new file mode 100644
index 000000000000..07017fb9b337
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_hwintf.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef NVRM_HWINTF_H
+#define NVRM_HWINTF_H
+
+#include "nvcommon.h"
+#include "nvrm_init.h"
+#include "nvrm_module.h"
+#include "nvrm_module_private.h"
+#include "nvrm_hardware_access.h"
+
+#endif /* NVRM_HWINTF_H */
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_ioctls.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_ioctls.h
new file mode 100644
index 000000000000..3ca48914bed3
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_ioctls.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef NVRM_IOCTLS_H
+#define NVRM_IOCTLS_H
+
+
+/* When we trap into the kernel, the majority of the ioctls
+ * are handled by the Generic handler, which is automatically
+ * generated by the IDL compiler.
+ *
+ * For some special functions, we override the generated code
+ * and supply custom marshalling/unmarshalling code for performance
+ * reasons. NvRmMemRead/Write are done this way to avoid having
+ * to allocate a buffer and do an extra copy.
+ *
+ * I'm sure as time passes we'll add more to the list here.
+ */
+
+typedef enum
+{
+ NvRmIoctls_Generic = 5000,
+ NvRmIoctls_NvRmMemRead,
+ NvRmIoctls_NvRmMemWrite,
+ NvRmIoctls_NvRmMemReadStrided,
+ NvRmIoctls_NvRmMemWriteStrided,
+ NvRmIoctls_NvRmMemMapIntoCallerPtr,
+ NvRmIoctls_NvRmGetCarveoutInfo,
+ NvRmIoctls_NvRmGraphics, // Note: not used in Linux (see nvidlcmd.h)
+ NvRmIoctls_NvRmFbControl,
+ NvRmIoctls_NvRmBootDone, // Called after primary boot-up complete
+
+ // These following ones are used for attaching to an existing NvRm
+ // context from another process - this is used for reference counting
+ // the kernel context when it is used both from a client process and
+ // the nvrm daemon in Linux. This mechanism is roughly equal to duplicating
+ // the nvrm driver filehandle across processes.
+ NvRmIoctls_NvRmGetClientId,
+ NvRmIoctls_NvRmClientAttach,
+ NvRmIoctls_NvRmClientDetach,
+
+ NvRmIoctls_ForceWord = 0x7FFFFFFF,
+} NvRmKernelIoctls;
+
+#endif
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_keylist.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_keylist.c
new file mode 100644
index 000000000000..8471e134366d
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_keylist.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvutil.h"
+#include "nvrm_keylist.h"
+
+#define NVRM_KEY_ARRAY_LEN 15
+
+// Key node structure
+typedef struct KeyRec
+{
+ NvU32 KeyID[NVRM_KEY_ARRAY_LEN];
+ NvU32 Value[NVRM_KEY_ARRAY_LEN];
+ NvU32 Count;
+ struct KeyRec *pNextKey;
+}Key;
+
+// Pointer to the Key Linked-List
+static Key s_InitialKeyList;
+static Key* s_pKeyList = NULL;
+
+// Handle to mutex for tread safety
+static NvOsMutexHandle s_Mutex = NULL;
+
+// Add a new key node to the existing list
+static NvError AddKeyToList(NvU32 KeyID, NvU32 Value);
+
+// Frees the Linked-List
+static void FreeKeyList(void);
+
+void NvRmPrivDeInitKeyList(NvRmDeviceHandle hRm);
+
+NvError NvRmPrivInitKeyList(
+ NvRmDeviceHandle hRm,
+ const NvU32 * InitialValues,
+ NvU32 InitialCount);
+
+NvError NvRmPrivInitKeyList(NvRmDeviceHandle hRm,
+ const NvU32 *InitialValues,
+ NvU32 InitialCount)
+{
+ NvError Error;
+ NvU32 i;
+
+ Error = NvOsMutexCreate(&s_Mutex);
+ if (Error!=NvSuccess)
+ return Error;
+
+ if (!s_pKeyList)
+ {
+ s_pKeyList = &s_InitialKeyList;
+ s_InitialKeyList.Count = 0;
+ s_InitialKeyList.pNextKey = NULL;
+ for (i=0; i<InitialCount; i++)
+ {
+ AddKeyToList(NvOdmKeyListId_ReservedAreaStart + i,
+ InitialValues[i]);
+ }
+ }
+
+ // Creating the Mutex
+ return Error;
+}
+
+void NvRmPrivDeInitKeyList(NvRmDeviceHandle hRm)
+{
+ NvOsMutexLock(s_Mutex);
+ FreeKeyList();
+ s_pKeyList = NULL;
+ NvOsMutexUnlock(s_Mutex);
+ NvOsMutexDestroy(s_Mutex);
+}
+
+NvU32 NvRmGetKeyValue(NvRmDeviceHandle hRm, NvU32 KeyID)
+{
+ Key *pList = s_pKeyList;
+ NvU32 Value = 0;
+ unsigned int i;
+
+ NvOsMutexLock(s_Mutex);
+ while (pList)
+ {
+ for (i=0; i<pList->Count; i++)
+ {
+ if (pList->KeyID[i] == KeyID)
+ {
+ Value = pList->Value[i];
+ goto cleanup;
+ }
+ }
+ pList = pList->pNextKey;
+ }
+cleanup:
+ NvOsMutexUnlock(s_Mutex);
+ // Returning value as 0 since key is not present
+ return Value;
+}
+
+NvError NvRmSetKeyValuePair(NvRmDeviceHandle hRm, NvU32 KeyID, NvU32 Value)
+{
+ Key *pList = s_pKeyList;
+ NvError e = NvSuccess;
+ unsigned int i;
+
+ if (KeyID >= NvOdmKeyListId_ReservedAreaStart &&
+ KeyID <= NvOdmKeyListId_ReservedAreaEnd)
+ return NvError_NotSupported;
+
+ NvOsMutexLock(s_Mutex);
+ // Checking if key already exists
+ while (pList)
+ {
+ for (i=0; i<pList->Count; i++)
+ {
+ if (pList->KeyID[i] == KeyID)
+ {
+ pList->Value[i] = Value;
+ goto cleanup;
+ }
+ }
+ pList = pList->pNextKey;
+ }
+ // Adding The new key to the list
+ e = AddKeyToList(KeyID, Value);
+cleanup:
+ NvOsMutexUnlock(s_Mutex);
+ return e;
+}
+
+
+NvError AddKeyToList(NvU32 KeyID, NvU32 Value)
+{
+ Key *pList;
+
+ if (s_pKeyList->Count < NVRM_KEY_ARRAY_LEN)
+ {
+ s_pKeyList->KeyID[s_pKeyList->Count] = KeyID;
+ s_pKeyList->Value[s_pKeyList->Count] = Value;
+ s_pKeyList->Count++;
+ }
+ else
+ {
+ pList = NvOsAlloc(sizeof(Key));
+
+ if (pList == NULL)
+ return NvError_InsufficientMemory;
+
+ pList->KeyID[0] = KeyID;
+ pList->Value[0] = Value;
+ pList->Count = 1;
+ pList->pNextKey = s_pKeyList;
+ s_pKeyList = pList;
+ }
+
+ return NvSuccess;
+}
+
+void FreeKeyList(void)
+{
+ Key *pTemp = s_pKeyList;
+ while (s_pKeyList != &s_InitialKeyList)
+ {
+ pTemp = s_pKeyList->pNextKey ;
+ NvOsFree(s_pKeyList);
+ s_pKeyList = pTemp;
+ }
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_memmgr.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_memmgr.c
new file mode 100644
index 000000000000..b8ade276482b
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_memmgr.c
@@ -0,0 +1,1212 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvrm_memmgr.h"
+#include "nvrm_memmgr_private.h"
+#include "nvrm_heap_simple.h"
+#include "ap15/ap15rm_private.h"
+#include "nvos.h"
+#include "nvbootargs.h"
+#include "nvrm_chiplib.h"
+
+/* FIXME: temporary hack to force all Linux allocations to be page-aligned */
+#if NVOS_IS_LINUX
+#define NVRM_ALLOC_MIN_ALIGN 4096
+#else
+#define NVRM_ALLOC_MIN_ALIGN 4
+#endif
+
+#define NVRM_CHECK_PIN 0
+
+#define NVRM_MEM_MAGIC 0xdead9812
+#define NVRM_HMEM_CHECK(hMem) \
+ do { \
+ if (NVRM_HMEM_CHECK_MAGIC) { \
+ NV_ASSERT(((NvU32)(hMem)&1)==0); \
+ if (((NvU32)(hMem)&1)) { \
+ (hMem) = idtomem(hMem); \
+ } \
+ NV_ASSERT((hMem)->magic == NVRM_MEM_MAGIC); \
+ } \
+ } while(0)
+
+static NvRmMemHandle idtomem(NvRmMemHandle hMem)
+{
+ NvOsDebugPrintf("RMMEM id->mem %08x\n",(int)hMem);
+ return (NvRmMemHandle)((NvU32)hMem&~1UL);
+}
+
+#if NVRM_MEM_TRACE
+#undef NvRmMemHandleCreate
+#undef NvRmMemHandleFree
+#undef NvRmMemGetId
+#undef NvRmMemHandleFromId
+#endif
+
+
+/* GART related */
+NvBool gs_GartInited = NV_FALSE;
+NvRmHeapSimple gs_GartAllocator;
+NvU32 *gs_GartSave = NULL;
+static NvRmPrivHeap gs_GartHeap;
+static NvUPtr gs_GartBaseAddr;
+
+static NvError (*s_HeapGartAlloc)( NvRmDeviceHandle hDevice,
+ NvOsPageAllocHandle hPageHandle,
+ NvU32 NumberOfPages, NvRmPhysAddr *PAddr);
+static void (*s_HeapGartFree)( NvRmDeviceHandle hDevice,
+ NvRmPhysAddr addr, NvU32 NumberOfPages);
+static void (*s_GartSuspend)( NvRmDeviceHandle hDevice ) = NULL;
+static void (*s_GartResume)( NvRmDeviceHandle hDevice ) = NULL;
+
+
+static NvU32 gs_NextPreservedMemHandleKey;
+static NvRmMemHandle gs_PreservedHandles[NV_BOOTARGS_MAX_PRESERVED_MEMHANDLES];
+
+/*
+ * Notes:
+ *
+ * 1) The allocation of the handles should fall back to a block allocator
+ * that allocates say 1024 at a time to reduce heap fragmentation.
+ *
+ */
+
+NvError NvRmMemHandleCreate(
+ NvRmDeviceHandle hRmDevice,
+ NvRmMemHandle *phMem,
+ NvU32 size)
+{
+ NvRmMemHandle pNewHandle = NULL;
+ NvError err = NvSuccess;
+
+#if NVCPU_IS_X86
+ pNewHandle = NvOsAlloc(sizeof(*pNewHandle)+4);
+ pNewHandle = (NvRmMemHandle)(((NvU32)pNewHandle+3)&~3UL);
+#else
+ pNewHandle = NvOsAlloc(sizeof(*pNewHandle));
+#endif
+ if (!pNewHandle)
+ {
+ err = NvError_InsufficientMemory;
+ goto exit_gracefully;
+ }
+
+ NV_ASSERT(((NvU32)pNewHandle & 1) == 0);
+
+ NvOsMemset(pNewHandle, 0, sizeof(*pNewHandle));
+ pNewHandle->size = size;
+ pNewHandle->hRmDevice = hRmDevice;
+ pNewHandle->PhysicalAddress = NV_RM_INVALID_PHYS_ADDRESS;
+ pNewHandle->VirtualAddress = NULL;
+ pNewHandle->refcount = 1;
+ pNewHandle->pin_count = 0;
+ pNewHandle->coherency = NvOsMemAttribute_Uncached;
+#if NVRM_HMEM_CHECK_MAGIC
+ pNewHandle->magic = NVRM_MEM_MAGIC;
+#endif
+
+ *phMem = pNewHandle;
+
+exit_gracefully:
+ if (err != NvSuccess)
+ NvOsFree(pNewHandle);
+
+ return err;
+}
+
+void NvRmPrivMemIncrRef(NvRmMemHandle hMem)
+{
+ NV_ASSERT(hMem);
+ NvOsAtomicExchangeAdd32(&hMem->refcount, 1);
+}
+
+static void NvRmPrivMemFree(NvRmMemHandle hMem)
+{
+ if (!hMem)
+ return;
+
+ NVRM_HMEM_CHECK(hMem);
+
+ if (!NV_RM_HMEM_IS_ALLOCATED(hMem))
+ return;
+
+ if(NVCPU_IS_X86 && !NvRmIsSimulation())
+ {
+ NvOsFree(hMem->VirtualAddress);
+ hMem->VirtualAddress = NULL;
+ }
+
+ switch (hMem->heap)
+ {
+ case NvRmHeap_ExternalCarveOut:
+ NvRmPrivHeapCarveoutFree(hMem->PhysicalAddress);
+ break;
+ case NvRmHeap_IRam:
+ NvRmPrivHeapIramFree(hMem->PhysicalAddress);
+ break;
+ case NvRmHeap_GART:
+ NvRmPhysicalMemUnmap(hMem->VirtualAddress, hMem->size);
+ (*s_HeapGartFree)(hMem->hRmDevice, hMem->PhysicalAddress,
+ hMem->Pages);
+ NvOsPageFree(hMem->hPageHandle);
+ break;
+ case NvRmHeap_External:
+ NvOsPageUnmap(hMem->hPageHandle, hMem->VirtualAddress, hMem->size);
+ NvOsPageFree(hMem->hPageHandle);
+ break;
+ default:
+ break;
+ }
+
+ hMem->PhysicalAddress = NV_RM_INVALID_PHYS_ADDRESS;
+ hMem->VirtualAddress = NULL;
+ hMem->heap = 0;
+#if NVRM_HMEM_CHECK_MAGIC
+ hMem->magic = 0;
+#endif
+}
+
+void NvRmMemHandleFree(NvRmMemHandle hMem)
+{
+ NvS32 old;
+ NvOsMutexHandle mutex;
+
+ if( !hMem )
+ {
+ return;
+ }
+
+ NVRM_HMEM_CHECK(hMem);
+ old = NvOsAtomicExchangeAdd32(&hMem->refcount, -1);
+ if(old > 1)
+ {
+ return;
+ }
+
+ NV_ASSERT(old != 0);
+
+ mutex = hMem->hRmDevice->MemMgrMutex;
+ NvOsMutexLock(mutex);
+
+ NvRmPrivMemFree(hMem);
+ NV_ASSERT(hMem->mapped == NV_FALSE);
+ if (hMem->mapped == NV_TRUE)
+ {
+ NvRmMemUnmap(hMem, hMem->VirtualAddress, hMem->size);
+ }
+
+#if NVRM_HMEM_CHECK_MAGIC
+ hMem->magic = 0;
+#endif
+
+ NvOsFree(hMem);
+
+ NvOsMutexUnlock( mutex );
+}
+
+#define ERRATA_398959(ChipId) \
+ ((ChipId).Id == 0x15 && (ChipId).Major == 1 && (ChipId).Minor == 1)
+
+
+NvError NvRmMemAlloc(
+ NvRmMemHandle hMem,
+ const NvRmHeap *Heaps,
+ NvU32 NumHeaps,
+ NvU32 Alignment,
+ NvOsMemAttribute Coherency)
+{
+ // Default heap list does not include GART due to AP15 hardware bug. GART
+ // will be re-added to default heap list on AP20 and beyond.
+ NvRmHeap DefaultHeaps[3];
+ NvU32 i;
+ NvError err;
+
+
+ NV_ASSERT(hMem && (!NumHeaps || Heaps));
+ NVRM_HMEM_CHECK(hMem);
+
+ /* FIXME: Windows should support full caching for memory handles.
+ * But not yet.
+ */
+#if !NVOS_IS_LINUX
+ Coherency = NvOsMemAttribute_Uncached;
+#endif
+
+ if (NV_RM_HMEM_IS_ALLOCATED(hMem))
+ return NvError_AlreadyAllocated;
+
+ if(NVCPU_IS_X86 && !NvRmIsSimulation())
+ {
+ hMem->VirtualAddress = NvOsAlloc(hMem->size);
+ if(hMem->VirtualAddress)
+ {
+ if (Heaps)
+ {
+ hMem->heap = Heaps[0];
+ }
+
+ return NvSuccess;
+ }
+ return NvError_InsufficientMemory;
+ }
+
+ NvOsMutexLock(hMem->hRmDevice->MemMgrMutex);
+
+ if (hMem->size <= NVCPU_MIN_PAGE_SIZE &&
+ (!NumHeaps || Heaps[0] != NvRmHeap_IRam))
+ {
+ DefaultHeaps[0] = NvRmHeap_External;
+ DefaultHeaps[1] = NvRmHeap_ExternalCarveOut;
+ Heaps = DefaultHeaps;
+ NumHeaps = 2;
+ }
+ else if (!NumHeaps)
+ {
+ DefaultHeaps[0] = NvRmHeap_ExternalCarveOut;
+ DefaultHeaps[1] = NvRmHeap_External;
+ NumHeaps = 2;
+ if (!ERRATA_398959(hMem->hRmDevice->ChipId))
+ DefaultHeaps[NumHeaps++] = NvRmHeap_GART;
+ Heaps = DefaultHeaps;
+ }
+
+ // 4 is the minimum alignment for any heap.
+ if (Alignment < NVRM_ALLOC_MIN_ALIGN)
+ Alignment = NVRM_ALLOC_MIN_ALIGN;
+
+ for (i=0, err=NvError_InsufficientMemory;
+ i<NumHeaps && err!=NvSuccess; i++)
+ {
+ if (Alignment > NVCPU_MIN_PAGE_SIZE &&
+ (Heaps[i]==NvRmHeap_External || Heaps[i]==NvRmHeap_GART))
+ {
+ NV_ASSERT(!"Invalid alignment request to GART / External heap");
+ continue;
+ }
+
+ switch (Heaps[i])
+ {
+ case NvRmHeap_ExternalCarveOut:
+ err = NvRmPrivHeapCarveoutAlloc(hMem->size,
+ Alignment, &hMem->PhysicalAddress);
+ break;
+ case NvRmHeap_IRam:
+ err = NvRmPrivHeapIramAlloc(hMem->size,
+ Alignment, &hMem->PhysicalAddress);
+ break;
+ case NvRmHeap_External:
+ err = NvOsPageAlloc(hMem->size, Coherency,
+ NvOsPageFlags_Contiguous, NVOS_MEM_READ_WRITE,
+ &hMem->hPageHandle);
+ break;
+ case NvRmHeap_GART:
+ err = NvOsPageAlloc(hMem->size, Coherency,
+ NvOsPageFlags_NonContiguous, NVOS_MEM_READ_WRITE,
+ &hMem->hPageHandle);
+
+ if (err != NvSuccess)
+ break;
+
+ hMem->Pages = (hMem->size+(GART_PAGE_SIZE-1))/GART_PAGE_SIZE;
+
+ err = (*s_HeapGartAlloc)(hMem->hRmDevice,
+ hMem->hPageHandle, hMem->Pages, &hMem->PhysicalAddress);
+
+ if (err == NvSuccess)
+ break;
+
+ hMem->Pages = 0;
+ NvOsPageFree(hMem->hPageHandle);
+ hMem->hPageHandle = NULL;
+ break;
+
+ default:
+ NV_ASSERT(!"Invalid heap in heaps array");
+ }
+
+ if (err==NvSuccess)
+ break;
+ }
+
+ NvOsMutexUnlock(hMem->hRmDevice->MemMgrMutex);
+
+ if (err == NvSuccess)
+ {
+ hMem->alignment = Alignment;
+ hMem->heap = Heaps[i];
+ hMem->coherency = Coherency;
+
+ /* Don't cache virtual mappings for cacheable handles in the RM,
+ * since there isn't a good way to ensure proper coherency */
+ if (Coherency != NvOsMemAttribute_WriteBack)
+ {
+ NvRmMemMap(hMem, 0, hMem->size, NVOS_MEM_READ_WRITE,
+ &hMem->VirtualAddress);
+ }
+ }
+
+ return err;
+}
+
+NvU32 NvRmMemPin(NvRmMemHandle hMem)
+{
+ NvS32 old;
+
+ NV_ASSERT(hMem);
+ NVRM_HMEM_CHECK(hMem);
+
+ old = NvOsAtomicExchangeAdd32(&hMem->pin_count, 1);
+
+ NV_ASSERT(old != -1);
+
+ // FIXME: finish implementation
+
+ if (NVCPU_IS_X86 && !NvRmIsSimulation())
+ return 0xFFFFFFFF;
+
+ switch (hMem->heap)
+ {
+ case NvRmHeap_External:
+ return (NvU32)NvOsPageAddress(hMem->hPageHandle, 0);
+ case NvRmHeap_ExternalCarveOut:
+ case NvRmHeap_GART:
+ case NvRmHeap_IRam:
+ return hMem->PhysicalAddress;
+ default:
+ NV_ASSERT(!"Unknown heap");
+ return 0xFFFFFFFF;
+ }
+}
+
+void NvRmMemPinMult(NvRmMemHandle *hMems, NvU32 *Addrs, NvU32 Count)
+{
+ NvU32 i;
+ for( i = 0; i < Count; i++ )
+ {
+ Addrs[i] = NvRmMemPin( hMems[i] );
+ }
+}
+
+void NvRmMemUnpin(NvRmMemHandle hMem)
+{
+ NvS32 old;
+
+ if( !hMem )
+ {
+ return;
+ }
+
+ NVRM_HMEM_CHECK(hMem);
+
+ old = NvOsAtomicExchangeAdd32(&hMem->pin_count, -1);
+ NV_ASSERT(old != 0);
+}
+
+void NvRmMemUnpinMult(NvRmMemHandle *hMems, NvU32 Count)
+{
+ NvU32 i;
+ for(i = 0; i < Count; i++)
+ {
+ NvRmMemUnpin(hMems[i]);
+ }
+}
+
+NvU32 NvRmMemGetAddress(NvRmMemHandle hMem, NvU32 Offset)
+{
+ NV_ASSERT(hMem != NULL);
+ NV_ASSERT(Offset < hMem->size);
+ NVRM_HMEM_CHECK(hMem);
+
+#if NVRM_CHECK_PIN
+ NV_ASSERT( hMem->pin_count );
+#endif
+
+ if(NVCPU_IS_X86 && !NvRmIsSimulation())
+ {
+ return (NvU32)-1;
+ }
+
+ switch (hMem->heap)
+ {
+ case NvRmHeap_External:
+ return (NvU32)NvOsPageAddress(hMem->hPageHandle, Offset);
+
+ case NvRmHeap_ExternalCarveOut:
+ case NvRmHeap_GART:
+ case NvRmHeap_IRam:
+ return (hMem->PhysicalAddress + Offset);
+
+ default:
+ NV_ASSERT(!"Unknown heap");
+ break;
+ }
+
+ return (NvU32)-1;
+}
+
+/* Attempt to use the pre-mapped carveout or iram aperture on Windows CE */
+#if !(NVOS_IS_LINUX && !NVCPU_IS_X86)
+static void *NvRmMemMapGlobalHeap(
+ NvRmPhysAddr base,
+ NvU32 len,
+ NvRmHeap heap,
+ NvOsMemAttribute coherency)
+{
+ if (coherency == NvOsMemAttribute_WriteBack)
+ return NULL;
+
+ if (heap == NvRmHeap_ExternalCarveOut)
+ return NvRmPrivHeapCarveoutMemMap(base, len, coherency);
+ else if (heap == NvRmHeap_IRam)
+ return NvRmPrivHeapIramMemMap(base, len, coherency);
+
+ return NULL;
+}
+#else
+#define NvRmMemMapGlobalHeap(base,len,heap,coherency) NULL
+#endif
+
+
+
+NvError NvRmMemMap(
+ NvRmMemHandle hMem,
+ NvU32 Offset,
+ NvU32 Size,
+ NvU32 Flags,
+ void **pVirtAddr)
+{
+ NV_ASSERT(Offset + Size <= hMem->size);
+ NVRM_HMEM_CHECK(hMem);
+
+ if (!hMem->VirtualAddress)
+ hMem->VirtualAddress = NvRmMemMapGlobalHeap(
+ hMem->PhysicalAddress+Offset, Size, hMem->heap, hMem->coherency);
+
+ if (NvRmIsSimulation())
+ return NvError_InsufficientMemory;
+
+ if (hMem->VirtualAddress)
+ {
+ *pVirtAddr = (NvU8 *)hMem->VirtualAddress + Offset;
+ return NvSuccess;
+ }
+
+ switch (hMem->heap)
+ {
+ case NvRmHeap_ExternalCarveOut:
+ case NvRmHeap_IRam:
+#if !(NVOS_IS_LINUX && !NVCPU_IS_X86)
+ case NvRmHeap_GART:
+#endif
+ return NvOsPhysicalMemMap(hMem->PhysicalAddress + Offset,
+ Size, hMem->coherency, Flags, pVirtAddr);
+ case NvRmHeap_External:
+ return NvOsPageMap(hMem->hPageHandle, Offset, Size, pVirtAddr);
+ default:
+ *pVirtAddr = NULL;
+ return NvError_NotSupported;
+ }
+}
+
+
+void NvRmMemUnmap(NvRmMemHandle hMem, void *pVirtAddr, NvU32 length)
+{
+ if (!hMem || !pVirtAddr || !length)
+ {
+ return;
+ }
+
+ NVRM_HMEM_CHECK(hMem);
+
+ // No mappings ever get created in these cases
+ if (NvRmIsSimulation() || NVCPU_IS_X86)
+ return;
+
+ if (hMem->VirtualAddress <= pVirtAddr &&
+ ((NvU8*)hMem->VirtualAddress + hMem->size) >= (NvU8*)pVirtAddr)
+ return;
+
+
+ switch (hMem->heap)
+ {
+ case NvRmHeap_External:
+ NvOsPageUnmap(hMem->hPageHandle, pVirtAddr, length);
+ break;
+ case NvRmHeap_ExternalCarveOut:
+ case NvRmHeap_IRam:
+#if NVOS_IS_WINDOWS
+ case NvRmHeap_GART:
+#endif
+ NvOsPhysicalMemUnmap(pVirtAddr, length);
+ break;
+ default:
+ break;
+ }
+}
+
+NvU8 NvRmMemRd08(NvRmMemHandle hMem, NvU32 Offset)
+{
+ void *vaddr;
+
+ NV_ASSERT(hMem->VirtualAddress != NULL);
+ if (!hMem->VirtualAddress)
+ return 0;
+
+ vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
+ NV_ASSERT(Offset + 1 <= hMem->size);
+ NVRM_HMEM_CHECK(hMem);
+ return NV_READ8(vaddr);
+}
+
+NvU16 NvRmMemRd16(NvRmMemHandle hMem, NvU32 Offset)
+{
+ void *vaddr;
+
+ NV_ASSERT(hMem->VirtualAddress != NULL);
+ if (!hMem->VirtualAddress)
+ return 0;
+
+ vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
+ NV_ASSERT(Offset + 2 <= hMem->size);
+ NVRM_HMEM_CHECK(hMem);
+ return NV_READ16(vaddr);
+}
+
+NvU32 NvRmMemRd32(NvRmMemHandle hMem, NvU32 Offset)
+{
+ void *vaddr;
+
+ NV_ASSERT(hMem->VirtualAddress != NULL);
+ if (!hMem->VirtualAddress)
+ return 0;
+
+ vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
+ NV_ASSERT(Offset + 4 <= hMem->size);
+ NVRM_HMEM_CHECK(hMem);
+ return NV_READ32(vaddr);
+}
+
+void NvRmMemWr08(NvRmMemHandle hMem, NvU32 Offset, NvU8 Data)
+{
+ void *vaddr;
+
+ NV_ASSERT(hMem->VirtualAddress != NULL);
+ if (!hMem->VirtualAddress)
+ return;
+
+ vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
+ NV_ASSERT(Offset + 1 <= hMem->size);
+ NVRM_HMEM_CHECK(hMem);
+ NVRM_RMC_TRACE((&hMem->hRmDevice->rmc, "MemoryWrite8 0x%x 0x%x\n",
+ hMem->PhysicalAddress + Offset, Data));
+ NV_WRITE08(vaddr, Data);
+}
+
+void NvRmMemWr16(NvRmMemHandle hMem, NvU32 Offset, NvU16 Data)
+{
+ void *vaddr;
+
+ NV_ASSERT(hMem->VirtualAddress != NULL);
+ if (!hMem->VirtualAddress)
+ return;
+
+ vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
+ NV_ASSERT(Offset + 2 <= hMem->size);
+ NVRM_HMEM_CHECK(hMem);
+ NVRM_RMC_TRACE((&hMem->hRmDevice->rmc, "MemoryWrite16 0x%x 0x%x\n",
+ hMem->PhysicalAddress + Offset, Data));
+ NV_WRITE16(vaddr, Data);
+}
+
+void NvRmMemWr32(NvRmMemHandle hMem, NvU32 Offset, NvU32 Data)
+{
+ void *vaddr;
+
+ NV_ASSERT(hMem->VirtualAddress != NULL);
+ if (!hMem->VirtualAddress)
+ return;
+
+ vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
+ NV_ASSERT(Offset + 4 <= hMem->size);
+ NVRM_HMEM_CHECK(hMem);
+ NVRM_RMC_TRACE((&hMem->hRmDevice->rmc, "MemoryWrite32 0x%x 0x%x\n",
+ hMem->PhysicalAddress + Offset, Data));
+ NV_WRITE32(vaddr, Data);
+}
+
+void NvRmMemRead(NvRmMemHandle hMem, NvU32 Offset, void *pDst, NvU32 Size)
+{
+ void *vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
+ NV_ASSERT(Offset + Size <= hMem->size);
+ NVRM_HMEM_CHECK(hMem);
+ NV_READ(pDst, vaddr, Size);
+}
+
+void NvRmMemWrite(
+ NvRmMemHandle hMem,
+ NvU32 Offset,
+ const void *pSrc,
+ NvU32 Size)
+{
+ void *vaddr = (NvU8 *)hMem->VirtualAddress + Offset;
+#if NV_DEF_RMC_TRACE
+ NvU32 i;
+#endif
+
+ NV_ASSERT(Offset + Size <= hMem->size);
+ NVRM_HMEM_CHECK(hMem);
+
+#if NV_DEF_RMC_TRACE
+ for (i = 0; i < Size; i++)
+ {
+ NvU8 Data = ((const NvU8 *)pSrc)[i];
+ NVRM_RMC_TRACE((&hMem->hRmDevice->rmc, "MemoryWrite8 0x%x 0x%x\n",
+ hMem->PhysicalAddress + i, Data));
+ }
+#endif
+
+ NV_WRITE(vaddr, pSrc, Size);
+}
+
+void NvRmMemReadStrided(
+ NvRmMemHandle hMem,
+ NvU32 Offset,
+ NvU32 SrcStride,
+ void *pDst,
+ NvU32 DstStride,
+ NvU32 ElementSize,
+ NvU32 Count)
+{
+ if ((ElementSize == SrcStride) && (ElementSize == DstStride))
+ {
+ NvRmMemRead(hMem, Offset, pDst, ElementSize * Count);
+ }
+ else
+ {
+ while (Count--)
+ {
+ NvRmMemRead(hMem, Offset, pDst, ElementSize);
+ Offset += SrcStride;
+ pDst = (NvU8 *)pDst + DstStride;
+ }
+ }
+}
+
+void NvRmMemWriteStrided(
+ NvRmMemHandle hMem,
+ NvU32 Offset,
+ NvU32 DstStride,
+ const void *pSrc,
+ NvU32 SrcStride,
+ NvU32 ElementSize,
+ NvU32 Count)
+{
+ if ((ElementSize == SrcStride) && (ElementSize == DstStride))
+ {
+ NvRmMemWrite(hMem, Offset, pSrc, ElementSize * Count);
+ }
+ else
+ {
+ while (Count--)
+ {
+ NvRmMemWrite(hMem, Offset, pSrc, ElementSize);
+ Offset += DstStride;
+ pSrc = (const NvU8 *)pSrc + SrcStride;
+ }
+ }
+}
+
+void NvRmMemMove(
+ NvRmMemHandle dstHMem,
+ NvU32 dstOffset,
+ NvRmMemHandle srcHMem,
+ NvU32 srcOffset,
+ NvU32 Size)
+{
+ NvU32 i;
+
+ NV_ASSERT(dstOffset + Size <= dstHMem->size);
+ NV_ASSERT(srcOffset + Size <= srcHMem->size);
+ NVRM_HMEM_CHECK(dstHMem);
+ NVRM_HMEM_CHECK(srcHMem);
+
+ if (((dstHMem->PhysicalAddress |
+ srcHMem->PhysicalAddress |
+ dstOffset |
+ srcOffset |
+ Size) & 3) == 0)
+ {
+ // everything is nicely word aligned
+ if (dstHMem == srcHMem && srcOffset < dstOffset)
+ {
+ for (i=Size; i; )
+ {
+ NvU32 data;
+ i -= 4;
+ data = NvRmMemRd32(srcHMem, srcOffset+i);
+ NvRmMemWr32(dstHMem, dstOffset+i, data);
+ }
+ }
+ else
+ {
+ for (i=0; i < Size; i+=4)
+ {
+ NvU32 data = NvRmMemRd32(srcHMem, srcOffset+i);
+ NvRmMemWr32(dstHMem, dstOffset+i, data);
+ }
+ }
+ }
+ else
+ {
+ // fall back to writing one byte at a time
+ if (dstHMem == srcHMem && srcOffset < dstOffset)
+ {
+ for (i=Size; i--;)
+ {
+ NvU8 data = NvRmMemRd08(srcHMem, srcOffset+i);
+ NvRmMemWr08(dstHMem, dstOffset+i, data);
+ }
+ }
+ else
+ {
+ for (i=0; i < Size; ++i)
+ {
+ NvU8 data = NvRmMemRd08(srcHMem, srcOffset+i);
+ NvRmMemWr08(dstHMem, dstOffset+i, data);
+ }
+ }
+ }
+}
+
+void NvRmMemCacheMaint(
+ NvRmMemHandle hMem,
+ void *pMapping,
+ NvU32 Size,
+ NvBool Writeback,
+ NvBool Invalidate)
+{
+ if (!hMem || !pMapping || !Size || !(Writeback || Invalidate))
+ return;
+
+ NVRM_HMEM_CHECK(hMem);
+ NV_ASSERT((NvU8*)pMapping+Size <= (NvU8*)hMem->VirtualAddress+Size);
+ if (Writeback && Invalidate)
+ NvOsDataCacheWritebackInvalidateRange(pMapping, Size);
+ else if (Writeback)
+ NvOsDataCacheWritebackRange(pMapping, Size);
+ else {
+ NV_ASSERT(!"Invalidate-only cache maintenance not supported in NvOs");
+ }
+}
+
+NvU32 NvRmMemGetSize(NvRmMemHandle hMem)
+{
+ NV_ASSERT(hMem);
+ NVRM_HMEM_CHECK(hMem);
+ return hMem->size;
+}
+
+NvU32 NvRmMemGetAlignment(NvRmMemHandle hMem)
+{
+ NV_ASSERT(hMem);
+ NVRM_HMEM_CHECK(hMem);
+ return hMem->alignment;
+}
+
+NvU32 NvRmMemGetCacheLineSize(void)
+{
+ // !!! FIXME: Currently for all our chips (ap15)
+ // both the L1 and L2 cache line sizes
+ // are 32 bytes. If this ever changes
+ // we need a way to figure it out on
+ // a chip by chip basis.
+ return 32;
+}
+
+NvRmHeap NvRmMemGetHeapType(NvRmMemHandle hMem, NvU32 *BaseAddr)
+{
+ NV_ASSERT(hMem);
+ NVRM_HMEM_CHECK(hMem);
+
+ if (hMem->heap == NvRmHeap_External)
+ *BaseAddr = (NvU32)NvOsPageAddress(hMem->hPageHandle, 0);
+ else
+ *BaseAddr = hMem->PhysicalAddress;
+
+ return hMem->heap;
+}
+
+
+void *NvRmHostAlloc(size_t size)
+{
+ return NvOsAlloc(size);
+}
+
+void NvRmHostFree(void *ptr)
+{
+ NvOsFree(ptr);
+}
+
+
+NvError NvRmMemMapIntoCallerPtr(
+ NvRmMemHandle hMem,
+ void *pCallerPtr,
+ NvU32 Offset,
+ NvU32 Size)
+{
+ NvError err;
+ NVRM_HMEM_CHECK(hMem);
+
+ // The caller should be asking for an even number of pages. not strictly
+ // required, but the caller has already had to do the work to calculate the
+ // required number of pages so they might as well pass in a nice round
+ // number, which makes it easier to find bugs.
+ NV_ASSERT( (Size & (NVCPU_MIN_PAGE_SIZE-1)) == 0);
+
+ // Make sure the supplied virtual address is page aligned.
+ NV_ASSERT( (((NvUPtr)pCallerPtr) & (NVCPU_MIN_PAGE_SIZE-1)) == 0);
+
+ if (hMem->heap == NvRmHeap_External ||
+ hMem->heap == NvRmHeap_GART)
+ {
+ err = NvOsPageMapIntoPtr(hMem->hPageHandle,
+ pCallerPtr,
+ Offset,
+ Size);
+ }
+ else if (hMem->heap == NvRmHeap_ExternalCarveOut ||
+ hMem->heap == NvRmHeap_IRam)
+ {
+ // The caller is responsible for sending a size that
+ // is the correct number of pages, including this pageoffset
+ // at the beginning of the first page.
+ NvU32 PhysicalAddr = hMem->PhysicalAddress + Offset;
+ PhysicalAddr = PhysicalAddr & ~(NVCPU_MIN_PAGE_SIZE-1);
+
+ err = NvOsPhysicalMemMapIntoCaller(
+ pCallerPtr,
+ PhysicalAddr,
+ Size,
+ NvOsMemAttribute_Uncached,
+ NVOS_MEM_WRITE | NVOS_MEM_READ);
+ }
+ else
+ {
+ return NvError_NotImplemented;
+ }
+
+ return err;
+}
+
+
+NvU32 NvRmMemGetId(NvRmMemHandle hMem)
+{
+ NvU32 id = (NvU32)hMem;
+
+ // !!! FIXME: Need to really create a unique id to handle the case where
+ // hMem is freed, and then the next allocated hMem returns the same pointer
+ // value.
+
+ NVRM_HMEM_CHECK(hMem);
+ NV_ASSERT(((NvU32)hMem & 1) == 0);
+ if (!hMem || ((NvU32)hMem & 1))
+ return 0;
+
+#if NVRM_MEM_CHECK_ID
+ id |= 1;
+#endif
+
+ return id;
+}
+
+NvError NvRmMemHandleFromId(NvU32 id, NvRmMemHandle *phMem)
+{
+ NvRmMemHandle hMem;
+ // !!! FIXME: (see comment in GetId). Specifically handle the case where
+ // the memory handle has already been freed.
+
+#if NVRM_MEM_CHECK_ID
+ *phMem = NULL;
+ NV_ASSERT(id & 1);
+ if (!(id & 1))
+ return NvError_BadParameter;
+#endif
+
+ hMem = (NvRmMemHandle)(id & ~1UL);
+
+ NVRM_HMEM_CHECK(hMem);
+
+ NvRmPrivMemIncrRef(hMem);
+
+ *phMem = hMem;
+ return NvSuccess;
+}
+
+NvError NvRmMemHandlePreserveHandle(
+ NvRmMemHandle hMem,
+ NvU32 *pKey)
+{
+ NvError e;
+ NvBootArgsPreservedMemHandle ArgMh;
+
+ NV_ASSERT(hMem && pKey);
+ NvOsMutexLock(hMem->hRmDevice->MemMgrMutex);
+ if (gs_NextPreservedMemHandleKey >=
+ (NvU32)NvBootArgKey_PreservedMemHandle_Num)
+ {
+ e = NvError_InsufficientMemory;
+ goto clean;
+ }
+
+ ArgMh.Address = (NvUPtr)hMem->PhysicalAddress;
+ ArgMh.Size = hMem->size;
+
+ e = NvOsBootArgSet(gs_NextPreservedMemHandleKey, &ArgMh, sizeof(ArgMh));
+
+ if (e==NvSuccess)
+ {
+ *pKey = gs_NextPreservedMemHandleKey;
+ gs_NextPreservedMemHandleKey++;
+ }
+ else
+ {
+ *pKey = 0;
+ e = NvError_InsufficientMemory;
+ }
+
+ clean:
+ NvOsMutexUnlock(hMem->hRmDevice->MemMgrMutex);
+ return e;
+}
+
+
+NvError NvRmMemHandleClaimPreservedHandle(
+ NvRmDeviceHandle hRm,
+ NvU32 Key,
+ NvRmMemHandle *pMem)
+{
+ NvU32 i;
+ NV_ASSERT(hRm && pMem && Key);
+ if (!pMem || !hRm ||
+ Key<NvBootArgKey_PreservedMemHandle_0 ||
+ Key>=NvBootArgKey_PreservedMemHandle_Num)
+ return NvError_BadParameter;
+
+ *pMem = NULL;
+
+ NvOsMutexLock(hRm->MemMgrMutex);
+ i = Key - NvBootArgKey_PreservedMemHandle_0;
+ *pMem = gs_PreservedHandles[i];
+ gs_PreservedHandles[i] = NULL;
+ NvOsMutexUnlock(hRm->MemMgrMutex);
+
+ if (*pMem)
+ return NvSuccess;
+
+ return NvError_InsufficientMemory;
+}
+
+
+NvRmPrivHeap *NvRmPrivHeapGartInit(NvRmDeviceHandle hRmDevice)
+{
+ NvError err;
+ NvU32 length = hRmDevice->GartMemoryInfo.size;
+ NvRmPhysAddr base = hRmDevice->GartMemoryInfo.base;
+ NvRmModuleCapability caps[2];
+ NvRmModuleCapability *pCap = NULL;
+
+ caps[0].MajorVersion = 1; // AP15, AP16
+ caps[0].MinorVersion = 0;
+ caps[0].EcoLevel = 0;
+ caps[0].Capability = &caps[0];
+
+ caps[1].MajorVersion = 1; // AP20/T20
+ caps[1].MinorVersion = 1;
+ caps[1].EcoLevel = 0;
+ caps[1].Capability = &caps[1];
+
+ NV_ASSERT_SUCCESS(NvRmModuleGetCapabilities(
+ hRmDevice,
+ NvRmPrivModuleID_MemoryController,
+ caps,
+ NV_ARRAY_SIZE(caps),
+ (void**)&pCap));
+
+ err = NvRmPrivHeapSimple_HeapAlloc(
+ base,
+ length,
+ &gs_GartAllocator);
+
+ if (err != NvSuccess)
+ return NULL;
+
+ gs_GartHeap.heap = NvRmHeap_GART;
+ gs_GartHeap.length = length;
+ gs_GartHeap.PhysicalAddress = base;
+
+ gs_GartBaseAddr = (NvUPtr)base;
+ (void)gs_GartBaseAddr;
+
+ if ((pCap->MajorVersion == 1) && (pCap->MinorVersion == 0))
+ {
+ s_HeapGartAlloc = NvRmPrivAp15HeapGartAlloc;
+ s_HeapGartFree = NvRmPrivAp15HeapGartFree;
+ s_GartSuspend = NvRmPrivAp15GartSuspend;
+ s_GartResume = NvRmPrivAp15GartResume;
+ }
+ else
+ {
+ s_HeapGartAlloc = NvRmPrivAp20HeapGartAlloc;
+ s_HeapGartFree = NvRmPrivAp20HeapGartFree;
+ s_GartSuspend = NvRmPrivAp20GartSuspend;
+ s_GartResume = NvRmPrivAp20GartResume;
+ }
+
+ return &gs_GartHeap;
+}
+
+void NvRmPrivHeapGartDeinit(void)
+{
+ // deinit the gart allocator
+
+ NvRmPrivHeapSimple_HeapFree(&gs_GartAllocator);
+ NvOsMemset(&gs_GartHeap, 0, sizeof(gs_GartHeap));
+ NvOsMemset(&gs_GartAllocator, 0, sizeof(gs_GartAllocator));
+ NvOsFree( gs_GartSave );
+ gs_GartInited = NV_FALSE;
+}
+
+void NvRmPrivGartSuspend(NvRmDeviceHandle hDevice)
+{
+ NV_ASSERT(s_GartSuspend);
+ (*s_GartSuspend)( hDevice );
+}
+
+void NvRmPrivGartResume(NvRmDeviceHandle hDevice)
+{
+ NV_ASSERT(s_GartResume);
+ (*s_GartResume)( hDevice );
+}
+
+void NvRmPrivPreservedMemHandleInit(NvRmDeviceHandle hRm)
+{
+ unsigned int i;
+ NvBootArgsPreservedMemHandle mem;
+
+ NvOsMemset(gs_PreservedHandles, 0, sizeof(gs_PreservedHandles));
+ gs_NextPreservedMemHandleKey = (NvU32)NvBootArgKey_PreservedMemHandle_0;
+
+ for (i=NvBootArgKey_PreservedMemHandle_0;
+ i<NvBootArgKey_PreservedMemHandle_Num; i++)
+ {
+ NvRmMemHandle hMem;
+ NvU32 j;
+
+ if (NvOsBootArgGet(i, &mem, sizeof(mem))!=NvSuccess)
+ break;
+
+ if (!mem.Address || !mem.Size)
+ break;
+
+ if (NvRmMemHandleCreate(hRm, &hMem, mem.Size)!=NvSuccess)
+ continue;
+
+ hMem->PhysicalAddress = mem.Address;
+ j = mem.Address;
+ hMem->alignment = 1;
+ while ((j & 1) == 0)
+ {
+ hMem->alignment <<= 1;
+ j >>= 1;
+ }
+
+ if (NvRmPrivHeapCarveoutPreAlloc(mem.Address, mem.Size)==NvSuccess)
+ {
+ hMem->heap = NvRmHeap_ExternalCarveOut;
+ hMem->VirtualAddress = NvRmPrivHeapCarveoutMemMap(mem.Address,
+ mem.Size, NvOsMemAttribute_Uncached);
+ }
+ else if (NvRmPrivHeapIramPreAlloc(mem.Address, mem.Size)==NvSuccess)
+ {
+ hMem->heap = NvRmHeap_IRam;
+ hMem->VirtualAddress = NvRmPrivHeapIramMemMap(mem.Address,
+ mem.Size, NvOsMemAttribute_Uncached);
+ }
+
+ if (hMem->heap)
+ gs_PreservedHandles[i-NvBootArgKey_PreservedMemHandle_0] = hMem;
+ else
+ NvRmMemHandleFree(hMem);
+ }
+}
+
+NvError NvRmMemGetStat(NvRmMemStat Stat, NvS32* Result)
+{
+ /* Main point of this function is to be compatible backwards and forwards,
+ * i.e., breaking analysis apps is the thing to avoid.
+ * Minimum hassle - maximum impact.
+ * Performance is not that big of a deal.
+ * Could be extended to use NvS64 as return value. However, NvS64 is
+ * slightly more challenging in terms of printing etc. at the client side.
+ * This function should return counts as raw data as possible; conversions
+ * to percentages or anything like that should be left to the client.
+ */
+ if (Stat == NvRmMemStat_TotalCarveout)
+ {
+ *Result = NvRmPrivHeapCarveoutTotalSize();
+ }
+ else if (Stat == NvRmMemStat_UsedCarveout)
+ {
+ *Result = NvRmPrivHeapCarveoutMemoryUsed();
+ }
+ else if (Stat == NvRmMemStat_LargestFreeCarveoutBlock)
+ {
+ *Result = NvRmPrivHeapCarveoutLargestFreeBlock();
+ }
+ else if (Stat == NvRmMemStat_TotalGart)
+ {
+ *Result = gs_GartHeap.length;
+ }
+ else if (Stat == NvRmMemStat_UsedGart)
+ {
+ *Result = NvRmPrivHeapSimpleMemoryUsed(&gs_GartAllocator);
+ }
+ else if (Stat == NvRmMemStat_LargestFreeGartBlock)
+ {
+ *Result = NvRmPrivHeapSimpleLargestFreeBlock(&gs_GartAllocator);
+ }
+ else
+ {
+ return NvError_BadParameter;
+ }
+ return NvSuccess;
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_memmgr_private.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_memmgr_private.h
new file mode 100644
index 000000000000..422b26c059d5
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_memmgr_private.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+
+#ifndef INCLUDED_NVDDK_MEMMGR_PRIVATE_H
+#define INCLUDED_NVDDK_MEMMGR_PRIVATE_H
+
+#include "nvrm_heap.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+#define NVRM_HMEM_CHECK_MAGIC NV_DEBUG
+
+#define NV_RM_HMEM_IS_ALLOCATED(hMem) \
+ (((hMem)->PhysicalAddress != NV_RM_INVALID_PHYS_ADDRESS) || \
+ ((hMem)->VirtualAddress != NULL) || \
+ ((hMem)->hPageHandle != NULL) )
+
+typedef struct NvRmMemRec
+{
+ void *VirtualAddress;
+ NvRmDeviceHandle hRmDevice;
+ NvOsPageAllocHandle hPageHandle;
+ NvRmPhysAddr PhysicalAddress;
+ NvU32 size;
+ NvU32 alignment;
+
+ /* Used for GART heap to keep track of the number of GART pages
+ * in use by this handle.
+ */
+ NvU32 Pages;
+
+ NvS32 refcount;
+ NvS32 pin_count;
+
+ NvOsMemAttribute coherency;
+ NvRmHeap heap;
+
+ NvBool mapped;
+ NvU8 priority;
+
+#if NVRM_HMEM_CHECK_MAGIC
+ NvU32 magic; // set to NVRM_MEM_MAGIC if valid
+#endif
+} NvRmMem;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+
+#endif
+
+
+
+
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_module.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_module.c
new file mode 100644
index 000000000000..c56a8232a6b7
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_module.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#if NV_IS_AVP
+#define NV_DEF_RMC_TRACE 0 // NO TRACING FOR AVP
+#endif
+
+#include "nvcommon.h"
+#include "nvassert.h"
+#include "nvos.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_module.h"
+#include "nvrm_module_private.h"
+#include "nvrm_moduleids.h"
+#include "nvrm_chipid.h"
+#include "nvrm_drf.h"
+#include "nvrm_power.h"
+#include "nvrm_structure.h"
+#include "ap15/ap15rm_private.h"
+#include "ap20/ap20rm_misc_private.h"
+#include "ap15/arclk_rst.h"
+
+#define NVRM_ENABLE_PRINTF 0 // Module debug: 0=disable, 1=enable
+
+#if (NV_DEBUG && NVRM_ENABLE_PRINTF)
+#define NVRM_MODULE_PRINTF(x) NvOsDebugPrintf x
+#else
+#define NVRM_MODULE_PRINTF(x)
+#endif
+
+
+// FIXME: this is hacked
+// Handled thru RmTransportMessaging to CPU
+void ap15Rm_AvpModuleReset(NvRmDeviceHandle hDevice, NvRmModuleID ModuleId);
+
+void AP15ModuleReset(NvRmDeviceHandle hDevice, NvRmModuleID ModuleId, NvBool hold);
+void AP20ModuleReset(NvRmDeviceHandle hDevice, NvRmModuleID ModuleId, NvBool hold);
+
+void
+NvRmModuleResetWithHold(NvRmDeviceHandle hDevice, NvRmModuleID ModuleId, NvBool hold)
+{
+#if !NV_IS_AVP
+ if ( hDevice->ChipId.Id == 0x15 || hDevice->ChipId.Id == 0x16)
+ {
+ AP15ModuleReset(hDevice, ModuleId, hold);
+ } else
+ {
+ AP20ModuleReset(hDevice, ModuleId, hold);
+ }
+#else
+ ap15Rm_AvpModuleReset(hDevice, ModuleId);
+#endif
+}
+
+void NvRmModuleReset(NvRmDeviceHandle hDevice, NvRmModuleID ModuleId)
+{
+ NvRmModuleResetWithHold(hDevice, ModuleId, NV_FALSE);
+}
+
+NvError
+NvRmModuleGetCapabilities(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId,
+ NvRmModuleCapability *pCaps,
+ NvU32 NumCaps,
+ void **Capability )
+{
+ NvError err;
+ NvRmModuleCapability *cap;
+ NvRmModuleInstance *inst;
+ NvBool found = NV_FALSE;
+ void *ret = 0;
+ NvU32 i;
+ NvRmChipId *id;
+
+ err = NvRmPrivGetModuleInstance( hDevice, ModuleId, &inst );
+ if( err != NvSuccess )
+ {
+ return err;
+ }
+
+ id = NvRmPrivGetChipId( hDevice );
+
+ for( i = 0; i < NumCaps; i++ )
+ {
+ cap = &pCaps[i];
+ ret = cap->Capability;
+
+ /* HW bug 574527 - version numbers for USB are wrong in the AP20 relocation table.*/
+ if (NVRM_MODULE_ID_MODULE( ModuleId ) == NvRmModuleID_Usb2Otg)
+ {
+ if (id->Id == 0x20)
+ {
+ NvU32 instance = NVRM_MODULE_ID_INSTANCE( ModuleId );
+
+ if (((cap->MinorVersion == 5) && (instance == 0)) ||
+ ((cap->MinorVersion == 6) && (instance == 1))||
+ ((cap->MinorVersion == 7) && (instance == 2)))
+ {
+ found = NV_TRUE;
+ break;
+ }
+ continue;
+ }
+ }
+
+ if( ( cap->MajorVersion == inst->MajorVersion ) &&
+ ( cap->MinorVersion == inst->MinorVersion ) )
+ {
+ // FIMXE: ignoring eco levels for now (properly)
+ // HACK: except display with AP16 A03/sim/emul (bug 515059)
+ if ( NVRM_MODULE_ID_MODULE( ModuleId ) == NvRmModuleID_Display )
+ {
+ if (id->Id == 0x16 &&
+ (id->Minor == 0x3 || id->Major == 0))
+ {
+ // Only accepts cap of (1,2,3) for this chipId
+ if (cap->MajorVersion == 1 && cap->MinorVersion == 2 &&
+ cap->EcoLevel == 0x3)
+ {
+ found = NV_TRUE;
+ break;
+ }
+ // else not found and continue on to next cap
+ continue;
+ }
+ }
+
+ found = NV_TRUE;
+ break;
+ }
+ }
+
+ if( !found )
+ {
+ NV_ASSERT(!"Could not find matching version of module in table");
+ *Capability = 0;
+ }
+
+ *Capability = ret;
+ return NvSuccess;
+}
+
+NvError
+NvRmPrivFindModule( NvRmDeviceHandle hDevice, NvU32 Address,
+ NvRmPrivModuleID* pModuleId )
+{
+ NvU32 i;
+ NvU32 devid;
+ NvRmModuleTable *tbl;
+ NvRmModuleInstance *inst;
+ NvRmModule *mod;
+ NvU16 num;
+
+ NV_ASSERT((pModuleId != NULL) && (hDevice != NULL));
+
+ tbl = NvRmPrivGetModuleTable( hDevice );
+
+ mod = tbl->Modules;
+ for( i = 0; i < NvRmPrivModuleID_Num; i++ )
+ {
+ if( mod[i].Index == NVRM_MODULE_INVALID )
+ {
+ continue;
+ }
+
+ // For each instance of the module id ...
+ inst = tbl->ModInst + mod[i].Index;
+ devid = inst->DeviceId;
+ num = 0;
+
+ while( devid == inst->DeviceId )
+ {
+ // Is the device address matches
+ if( inst->PhysAddr == Address )
+ {
+ // Return the module id and instance information.
+ *pModuleId = (NvRmPrivModuleID)NVRM_MODULE_ID(i, num);
+ return NvSuccess;
+ }
+
+ inst++;
+ num++;
+ }
+ }
+
+ // we are here implies no matching module was found.
+ return NvError_ModuleNotPresent;
+}
+
+NvError
+NvRmQueryChipUniqueId(NvRmDeviceHandle hDevHandle, NvU32 IdSize, void* pId)
+{
+ NvU32 Size = IdSize; // Size of the output buffer
+ NvError err = NvError_NotSupported;
+
+ NV_ASSERT(hDevHandle);
+ NV_ASSERT(pId);
+ // Update the intended size
+ IdSize = sizeof(NvU64);
+ if ((pId == NULL)||(Size < IdSize))
+ {
+ return NvError_BadParameter;
+ }
+
+ switch (hDevHandle->ChipId.Id)
+ {
+ case 0x15:
+ case 0x16: // ap16 should use same space of ap15 for fuse info.
+ NvOsMemset(pId, 0, Size);
+ err = NvRmPrivAp15ChipUniqueId(hDevHandle,pId);
+ break;
+ case 0x20:
+ NvOsMemset(pId, 0, Size);
+ err = NvRmPrivAp20ChipUniqueId(hDevHandle,pId);
+ break;
+ default:
+ NV_ASSERT(!"Unsupported chip ID");
+ return err;
+ }
+ return err;
+}
+
+NvError NvRmGetRandomBytes(
+ NvRmDeviceHandle hRm,
+ NvU32 NumBytes,
+ void *pBytes)
+{
+ NvU8 *Array = (NvU8 *)pBytes;
+ NvU16 Val;
+
+ if (!hRm || !pBytes)
+ return NvError_BadParameter;
+
+ while (NumBytes)
+ {
+ Val = (NvU16) NV_REGR(hRm, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_PLL_LFSR_0);
+ *Array++ = (Val & 0xff);
+ Val>>=8;
+ NumBytes--;
+ if (NumBytes)
+ {
+ *Array++ = (Val & 0xff);
+ NumBytes--;
+ }
+ }
+
+ return NvSuccess;
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_module_common.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_module_common.c
new file mode 100644
index 000000000000..d5ae992f2a21
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_module_common.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#if NV_IS_AVP
+#define NV_DEF_RMC_TRACE 0 // NO TRACING FOR AVP
+#endif
+
+#include "nvcommon.h"
+#include "nvassert.h"
+#include "nvos.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_module.h"
+#include "nvrm_module_private.h"
+#include "nvrm_moduleids.h"
+#include "ap15/ap15rm_private.h"
+
+#define NVRM_ENABLE_PRINTF 0 // Module debug: 0=disable, 1=enable
+
+#if (NV_DEBUG && NVRM_ENABLE_PRINTF)
+#define NVRM_MODULE_PRINTF(x) NvOsDebugPrintf x
+#else
+#define NVRM_MODULE_PRINTF(x)
+#endif
+
+NvError
+NvRmPrivModuleInit( NvRmModuleTable *mod_table, NvU32 *reloc_table )
+{
+ NvError err;
+ NvU32 i;
+
+ /* invalidate the module table */
+ for( i = 0; i < NvRmPrivModuleID_Num; i++ )
+ {
+ mod_table->Modules[i].Index = NVRM_MODULE_INVALID;
+ }
+
+ /* clear the irq map */
+ NvOsMemset( &mod_table->IrqMap, 0, sizeof(mod_table->IrqMap) );
+
+ err = NvRmPrivRelocationTableParse( reloc_table,
+ &mod_table->ModInst, &mod_table->LastModInst,
+ mod_table->Modules, &mod_table->IrqMap );
+ if( err != NvSuccess )
+ {
+ NV_ASSERT( !"NvRmPrivModuleInit failed" );
+ return err;
+ }
+
+ NV_ASSERT( mod_table->LastModInst);
+ NV_ASSERT( mod_table->ModInst );
+
+ mod_table->NumModuleInstances = mod_table->LastModInst -
+ mod_table->ModInst;
+
+ return NvSuccess;
+}
+
+void
+NvRmPrivModuleDeinit( NvRmModuleTable *mod_table )
+{
+}
+
+NvError
+NvRmPrivGetModuleInstance( NvRmDeviceHandle hDevice, NvRmModuleID ModuleId,
+ NvRmModuleInstance **out )
+{
+ NvRmModuleTable *tbl;
+ NvRmModule *module; // Pointer to module table
+ NvRmModuleInstance *inst; // Pointer to device instance
+ NvU32 DeviceId; // Hardware device id
+ NvU32 Module;
+ NvU32 Instance;
+
+ *out = NULL;
+
+ NV_ASSERT( hDevice );
+
+ tbl = NvRmPrivGetModuleTable( hDevice );
+
+ Module = NVRM_MODULE_ID_MODULE( ModuleId );
+ Instance = NVRM_MODULE_ID_INSTANCE( ModuleId );
+ NV_ASSERT( (NvU32)Module < (NvU32)NvRmPrivModuleID_Num );
+
+ // Get a pointer to the first instance of this module id type.
+ module = tbl->Modules;
+
+ // Check whether the index is valid or not.
+ if (module[Module].Index == NVRM_MODULE_INVALID)
+ {
+ return NvError_NotSupported;
+ }
+
+ inst = tbl->ModInst + module[Module].Index;
+
+ // Get its device id.
+ DeviceId = inst->DeviceId;
+
+ // Now point to the desired instance.
+ inst += Instance;
+
+ // Is this a valid instance and is it of the same hardware type?
+ if ((inst >= tbl->LastModInst) || (DeviceId != inst->DeviceId))
+ {
+ // Invalid instance.
+ return NvError_BadValue;
+ }
+
+ *out = inst;
+
+ // Check if instance is still valid and not bonded out.
+ // Still returning inst structure.
+ if ( (NvU8)-1 == inst->DevIdx )
+ return NvError_NotSupported;
+
+ return NvSuccess;
+}
+
+void
+NvRmModuleGetBaseAddress( NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId, NvRmPhysAddr* pBaseAddress,
+ NvU32* pSize )
+{
+ NvRmModuleInstance *inst;
+
+ NV_ASSERT_SUCCESS(
+ NvRmPrivGetModuleInstance(hDevice, ModuleId, &inst)
+ );
+
+ if (pBaseAddress)
+ *pBaseAddress = inst->PhysAddr;
+ if (pSize)
+ *pSize = inst->Length;
+}
+
+NvU32
+NvRmModuleGetNumInstances(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID Module)
+{
+ NvU32 Instances = 0;
+ NvU32 numInstances = 0;
+ for (;;)
+ {
+ NvRmModuleInstance *inst;
+ NvError e = NvRmPrivGetModuleInstance(
+ hDevice, NVRM_MODULE_ID(Module, Instances), &inst);
+ if (e != NvSuccess)
+ {
+ if ( !(inst && ((NvU8)-1 == inst->DevIdx)) )
+ break;
+ /* else if a module instance not avail (bonded out), continue
+ looking for next instance. */
+ }
+ else
+ numInstances++;
+ Instances++;
+ }
+ return numInstances;
+}
+
+NvError
+NvRmModuleGetModuleInfo(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID module,
+ NvU32 * pNum,
+ NvRmModuleInfo *pModuleInfo
+ )
+{
+ NvU32 instance = 0;
+ NvU32 numInstances = 0;
+
+ if ( NULL == pNum )
+ return NvError_BadParameter;
+
+ // if !pModuleInfo, returns total numInstances
+ while ( (NULL == pModuleInfo) || (numInstances < *pNum) )
+ {
+ NvRmModuleInstance *inst;
+ NvError e = NvRmPrivGetModuleInstance(
+ hDevice, NVRM_MODULE_ID(module, instance), &inst);
+ if (e != NvSuccess)
+ {
+ if ( !(inst && ((NvU8)-1 == inst->DevIdx)) )
+ break;
+ /* else if a module instance not avail (bonded out), continue
+ looking for next instance. */
+ }
+ else
+ {
+ if ( pModuleInfo )
+ {
+ pModuleInfo->Instance = instance;
+ pModuleInfo->BaseAddress = inst->PhysAddr;
+ pModuleInfo->Length = inst->Length;
+ pModuleInfo++;
+ }
+ numInstances++;
+ }
+ instance++;
+ }
+ *pNum = numInstances; // update with correct number of instances
+
+ return NvSuccess;
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_module_private.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_module_private.h
new file mode 100644
index 000000000000..0648911e0b1c
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_module_private.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef NVRM_MODULE_PRIVATE_H
+#define NVRM_MODULE_PRIVATE_H
+
+#include "nvcommon.h"
+#include "nvrm_init.h"
+#include "nvrm_relocation_table.h"
+#include "nvrm_moduleids.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+typedef struct NvRmModuleTableRec
+{
+ NvRmModule Modules[NvRmPrivModuleID_Num];
+ NvRmModuleInstance *ModInst;
+ NvRmModuleInstance *LastModInst;
+ NvU32 NumModuleInstances;
+ NvRmIrqMap IrqMap;
+} NvRmModuleTable;
+
+/**
+ * Initialize the module info via the relocation table.
+ *
+ * @param mod_table The module table
+ * @param reloc_table The relocation table
+ * @param modid The module id conversion function
+ */
+NvError
+NvRmPrivModuleInit(
+ NvRmModuleTable *mod_table,
+ NvU32 *reloc_table);
+
+void
+NvRmPrivModuleDeinit(
+ NvRmModuleTable *mod_table );
+
+NvRmModuleTable *
+NvRmPrivGetModuleTable(
+ NvRmDeviceHandle hDevice );
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // NVRM_MODULE_PRIVATE_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_moduleids.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_moduleids.h
new file mode 100644
index 000000000000..3d63f093b932
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_moduleids.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef NVRM_MODULEIDS_H
+#define NVRM_MODULEIDS_H
+
+#include "nvcommon.h"
+#include "nvrm_module.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+/* FIXME - This is depricated. Use NvRmModuleID instead*/
+typedef NvRmModuleID NvRmPrivModuleID;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // NVRM_MODULEIDS_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_pinmux.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_pinmux.c
new file mode 100644
index 000000000000..a14cc3a015bf
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_pinmux.c
@@ -0,0 +1,1015 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_ENABLE_DEBUG_PRINTS 0
+#define SKIP_TRISTATE_REFCNT 0
+
+#include "nvcommon.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_drf.h"
+#include "nvassert.h"
+#include "nvrm_hwintf.h"
+#include "ap15/ap15rm_private.h"
+#include "ap15/arapb_misc.h"
+#include "nvrm_pinmux_utils.h"
+#include "nvodm_query_pinmux.h"
+
+/* Each of the pin mux configurations defined in the pin mux spreadsheet are
+ * stored in chip-specific tables. For each configuration, every pad group
+ * that must be programmed is stored as a single 32b entry, where the register
+ * offset (for both the tristate and pin mux control registers), field bit
+ * position (ditto), pin mux mask, and new pin mux state are programmed.
+ *
+ * The tables are microcode for a simple state machine. The state machine
+ * supports subroutine call/return (up to 4 levels of nesting), so that
+ * pin mux configurations which have substantial repetition can be
+ * represented compactly by separating common portion of the configurations
+ * into a subroutine. Additionally, the state machine supports
+ * "unprogramming" of the pin mux registers, so that pad groups which are
+ * incorrectly programmed to mux from a controller may be safely disowned,
+ * ensuring that no conflicts exist where multiple pad groups are muxing
+ * the same set of signals.
+ *
+ * Each module instance array has a reserved "reset" configuration at index
+ * zero. This special configuration is used in order to disown all pad
+ * groups whose reset state refers to the module instance. When a module
+ * instance configuration is to be applied, the reset configuration will
+ * first be applied, to ensure that no conflicts will arise between register
+ * reset values and the new configuration, followed by the application of
+ * the requested configuration.
+ *
+ * Furthermore, for controllers which support dynamic pinmuxing (i.e.,
+ * the "Multiplexed" pin map option), the last table entry is reserved for
+ * a "global unset," which will ensure that all configurations are disowned.
+ * This Multiplexed configuration should be applied before transitioning
+ * from one configuration to a second one.
+ *
+ * The table data has been packed into a single 32b entry to minimize code
+ * footprint using macros similar to the hardware register definitions, so
+ * that all of the shift and mask operations can be performed with the DRF
+ * macros.
+ */
+
+static void NvRmPrivApplyAllPinMuxes(
+ NvRmDeviceHandle hDevice,
+ NvBool First);
+
+static void NvRmPrivApplyAllModuleTypePinMuxes(
+ NvRmDeviceHandle hDevice,
+ NvU32 Module,
+ NvBool ApplyReset,
+ NvBool ApplyActual);
+
+/* FindConfigStart searches through an array of configuration data to find the
+ * starting position of a particular configuration in a module instance array.
+ * The stop position is programmable, so that sub-routines can be placed after
+ * the last valid true configuration */
+
+static const NvU32* NvRmPrivFindConfigStart(
+ const NvU32* Instance,
+ NvU32 Config,
+ NvU32 EndMarker)
+{
+ NvU32 Cnt = 0;
+ while ((Cnt < Config) && (*Instance!=EndMarker))
+ {
+ switch (NV_DRF_VAL(MUX, ENTRY, STATE, *Instance)) {
+ case PinMuxConfig_BranchLink:
+ case PinMuxConfig_OpcodeExtend:
+ if (*Instance==CONFIGEND())
+ Cnt++;
+ Instance++;
+ break;
+ default:
+ Instance += NVRM_PINMUX_SET_OPCODE_SIZE;
+ break;
+ }
+ }
+
+ /* Ugly postfix. In modules with bonafide subroutines, the last
+ * configuration CONFIGEND() will be followed by a MODULEDONE()
+ * token, with the first Set/Unset/Branch of the subroutine
+ * following that. To avoid leaving the "PC" pointing to a
+ * MODULEDONE() in the case where the first subroutine should be
+ * executed, fudge the "PC" up by one, to point to the subroutine. */
+ if (EndMarker==SUBROUTINESDONE() && *Instance==MODULEDONE())
+ Instance++;
+
+ if (*Instance==EndMarker)
+ Instance = NULL;
+
+ return Instance;
+}
+
+/* NvRmSetPadTristates will increment / decrement the reference count for
+ * each pad group's global tristate value for each "ConfigSet" command in
+ * a pad group configuration, and update the register as needed */
+static void NvRmPrivSetPadTristates(
+ NvRmDeviceHandle hDevice,
+ const NvU32* Module,
+ NvU32 Config,
+ NvBool EnableTristate)
+{
+ int StackDepth = 0;
+ const NvU32 *Instance = NULL;
+ const NvU32 *ReturnStack[MAX_NESTING_DEPTH+1];
+
+ /* The re-multiplexing configuration is stored in program 0,
+ * along with the reset config. */
+ if (Config==NVODM_QUERY_PINMAP_MULTIPLEXED)
+ Config = 0;
+
+ Instance = NvRmPrivFindConfigStart(Module, Config, MODULEDONE());
+ /* The first stack return entry is NULL, so that when a ConfigEnd is
+ * encountered in the "main" configuration program, we pop off a NULL
+ * pointer, which causes the configuration loop to terminate. */
+ ReturnStack[0] = NULL;
+
+ /* This loop iterates over all of the pad groups that need to be updated,
+ * and updates the reference count for each appropriately. */
+
+ NvOsMutexLock(hDevice->mutex);
+
+ while (Instance)
+ {
+ switch (NV_DRF_VAL(MUX,ENTRY, STATE, *Instance)) {
+ case PinMuxConfig_OpcodeExtend:
+ /* Pop the most recent return address off of the return stack
+ * (which will be NULL if no values have been pushed onto the
+ * stack) */
+ if (NV_DRF_VAL(MUX,ENTRY, OPCODE_EXTENSION,
+ *Instance)==PinMuxOpcode_ConfigEnd)
+ {
+ Instance = ReturnStack[StackDepth--];
+ }
+ /* ModuleDone & SubroutinesDone should never be encountered
+ * during execution, for properly-formatted tables. */
+ else
+ {
+ NV_ASSERT(0 && "Logical entry in table!\n");
+ }
+ break;
+ case PinMuxConfig_BranchLink:
+ /* Push the next instruction onto the return stack if nesting space
+ is available, and jump to the target. */
+ NV_ASSERT(StackDepth<MAX_NESTING_DEPTH);
+ ReturnStack[++StackDepth] = Instance+1;
+ Instance = NvRmPrivFindConfigStart(Module,
+ NV_DRF_VAL(MUX,ENTRY,BRANCH_ADDRESS,*Instance),
+ SUBROUTINESDONE());
+ NV_ASSERT(Instance && "Invalid branch configuration in table!\n");
+ break;
+ case PinMuxConfig_Set:
+ {
+ NvS16 SkipUpdate;
+ NvU32 TsOffs = NV_DRF_VAL(MUX,ENTRY, TS_OFFSET, *Instance);
+ NvU32 TsShift = NV_DRF_VAL(MUX,ENTRY, TS_SHIFT, *Instance);
+
+/* abuse pre/post-increment, to ensure that skipUpdate is 0 when the
+ * register needs to be programmed (i.e., enabling and previous value was 0,
+ * or disabling and new value is 0).
+ */
+ if (EnableTristate)
+#if (SKIP_TRISTATE_REFCNT == 0)
+ SkipUpdate = --hDevice->TristateRefCount[TsOffs*32 + TsShift];
+ else
+ SkipUpdate = hDevice->TristateRefCount[TsOffs*32 + TsShift]++;
+#else
+ SkipUpdate = 1;
+ else
+ SkipUpdate = 0;
+#endif
+
+#if (SKIP_TRISTATE_REFCNT == 0)
+ if (SkipUpdate < 0)
+ {
+ hDevice->TristateRefCount[TsOffs*32 + TsShift] = 0;
+ NV_DEBUG_PRINTF(("(%s:%s) Negative reference count detected "
+ "on TRISTATE_REG_%c_0, bit %u\n",
+ __FILE__, __LINE__, ('A'+(TsOffs)), TsShift));
+ //NV_ASSERT(SkipUpdate>=0);
+ }
+#endif
+
+ if (!SkipUpdate)
+ {
+ NvU32 Curr = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_TRISTATE_REG_A_0 + 4*TsOffs);
+ Curr &= ~(1<<TsShift);
+#if (SKIP_TRISTATE_REFCNT == 0)
+ Curr |= (EnableTristate?1:0)<<TsShift;
+#endif
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_TRISTATE_REG_A_0 + 4*TsOffs, Curr);
+
+#if NVRM_PINMUX_DEBUG_FLAG
+ NV_DEBUG_PRINTF(("Setting TRISTATE_REG_%s to %s\n",
+ (const char*)Instance[2],
+ (EnableTristate)?"TRISTATE" : "NORMAL"));
+#endif
+ }
+ }
+ /* fall through.
+ * The "Unset" configurations are not applicable to tristate
+ * configuration, so skip over them. */
+ case PinMuxConfig_Unset:
+ Instance += NVRM_PINMUX_SET_OPCODE_SIZE;
+ break;
+ }
+ }
+ NvOsMutexUnlock(hDevice->mutex);
+}
+
+/* NvRmSetPinMuxCtl will apply new pin mux configurations to the pin mux
+ * control registers. */
+static void NvRmPrivSetPinMuxCtl(
+ NvRmDeviceHandle hDevice,
+ const NvU32* Module,
+ NvU32 Config)
+{
+ NvU32 MuxCtlOffset, MuxCtlShift, MuxCtlMask, MuxCtlSet, MuxCtlUnset;
+ const NvU32 *ReturnStack[MAX_NESTING_DEPTH+1];
+ const NvU32 *Instance;
+ int StackDepth = 0;
+ NvU32 Curr;
+
+ ReturnStack[0] = NULL;
+ Instance = Module;
+
+ NvOsMutexLock(hDevice->mutex);
+
+ /* The re-multiplexing configuration is stored in program 0,
+ * along with the reset config. */
+ if (Config==NVODM_QUERY_PINMAP_MULTIPLEXED)
+ Config = 0;
+
+ Instance = NvRmPrivFindConfigStart(Module, Config, MODULEDONE());
+
+ // Apply the new configuration, setting / unsetting as appropriate
+ while (Instance)
+ {
+ switch (NV_DRF_VAL(MUX,ENTRY, STATE, *Instance)) {
+ case PinMuxConfig_OpcodeExtend:
+ if (NV_DRF_VAL(MUX,ENTRY, OPCODE_EXTENSION,
+ *Instance)==PinMuxOpcode_ConfigEnd)
+ {
+ Instance = ReturnStack[StackDepth--];
+ }
+ else
+ {
+ NV_ASSERT(0 && "Logical entry in table!\n");
+ }
+ break;
+ case PinMuxConfig_BranchLink:
+ NV_ASSERT(StackDepth<MAX_NESTING_DEPTH);
+ ReturnStack[++StackDepth] = Instance+1;
+ Instance = NvRmPrivFindConfigStart(Module,
+ NV_DRF_VAL(MUX,ENTRY,BRANCH_ADDRESS,*Instance),
+ SUBROUTINESDONE());
+ NV_ASSERT(Instance && "Invalid branch configuration in table!\n");
+ break;
+ default:
+ {
+ MuxCtlOffset = NV_DRF_VAL(MUX,ENTRY, MUX_CTL_OFFSET, *Instance);
+ MuxCtlShift = NV_DRF_VAL(MUX,ENTRY, MUX_CTL_SHIFT, *Instance);
+ MuxCtlUnset = NV_DRF_VAL(MUX,ENTRY, MUX_CTL_UNSET, *Instance);
+ MuxCtlSet = NV_DRF_VAL(MUX,ENTRY, MUX_CTL_SET, *Instance);
+ MuxCtlMask = NV_DRF_VAL(MUX, ENTRY, MUX_CTL_MASK, *Instance);
+
+ Curr = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_PIN_MUX_CTL_A_0 + 4*MuxCtlOffset);
+
+ if (NV_DRF_VAL(MUX,ENTRY,STATE,*Instance)==PinMuxConfig_Set)
+ {
+ Curr &= ~(MuxCtlMask<<MuxCtlShift);
+ Curr |= (MuxCtlSet<<MuxCtlShift);
+#if NVRM_PINMUX_DEBUG_FLAG
+ NV_DEBUG_PRINTF(("Configuring PINMUX_CTL_%s\n",
+ (const char *)Instance[1]));
+#endif
+
+ }
+ else if (((Curr>>MuxCtlShift)&MuxCtlMask)==MuxCtlUnset)
+ {
+ NV_ASSERT(NV_DRF_VAL(MUX,ENTRY,STATE,
+ *Instance)==PinMuxConfig_Unset);
+ Curr &= ~(MuxCtlMask<<MuxCtlShift);
+ Curr |= (MuxCtlSet<<MuxCtlShift);
+#if NVRM_PINMUX_DEBUG_FLAG
+ NV_DEBUG_PRINTF(("Unconfiguring PINMUX_CTL_%s\n",
+ (const char *)Instance[1]));
+#endif
+ }
+
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_PIN_MUX_CTL_A_0 + 4*MuxCtlOffset, Curr);
+ Instance += NVRM_PINMUX_SET_OPCODE_SIZE;
+ break;
+ }
+ }
+ }
+ NvOsMutexUnlock(hDevice->mutex);
+}
+
+static void NvRmPrivApplyAllPinMuxes(
+ NvRmDeviceHandle hDevice,
+ NvBool First)
+{
+ NvOdmIoModule Module;
+
+ NV_ASSERT(hDevice->PinMuxTable);
+
+ for (Module=NvOdmIoModule_Ata; Module<NvOdmIoModule_Num; Module++)
+ {
+ NvBool ApplyActual = NV_TRUE;
+ /* During early boot, the only device that has its pin mux correctly
+ * initialized is the I2C PMU controller, so that primitive peripherals
+ * (EEPROMs, PMU, RTC) can be accessed during the boot process */
+ if (First)
+ ApplyActual = (Module==NvOdmIoModule_I2c_Pmu);
+
+ NvRmPrivApplyAllModuleTypePinMuxes(hDevice, Module,
+ First, ApplyActual);
+ }
+}
+
+static void NvRmPrivApplyAllModuleTypePinMuxes(
+ NvRmDeviceHandle hDevice,
+ NvU32 Module,
+ NvBool ApplyReset,
+ NvBool ApplyActual)
+{
+ const NvU32 *OdmConfigs;
+ NvU32 NumOdmConfigs;
+ const NvU32 **ModulePrograms = hDevice->PinMuxTable[(NvU32)Module];
+
+ if (!ModulePrograms)
+ return;
+
+ if (ApplyActual)
+ NvOdmQueryPinMux(Module, &OdmConfigs, &NumOdmConfigs);
+ else
+ {
+ OdmConfigs = NULL;
+ NumOdmConfigs = 0;
+ }
+
+ for (; *ModulePrograms ; ModulePrograms++)
+ {
+ /* Apply the reset configuration to ensure that the module is in
+ * a sane state, then apply the ODM configuration, if one is specified
+ */
+ if (ApplyReset)
+ NvRmPrivSetPinMuxCtl(hDevice, *ModulePrograms, 0);
+ if (NumOdmConfigs && ApplyActual)
+ {
+ NvRmPrivSetPinMuxCtl(hDevice, *ModulePrograms, *OdmConfigs);
+ NumOdmConfigs--;
+ OdmConfigs++;
+ }
+ }
+ /* If the ODM pin mux table is created correctly, there should be
+ * the same number of ODM configs as module instances; however, we
+ * allow the ODM to specify fewer configs than instances with assumed
+ * zeros for undefined modules */
+ while (NumOdmConfigs)
+ {
+ NV_ASSERT((*OdmConfigs==0) &&
+ "More ODM configs than module instances!\n");
+ NumOdmConfigs--;
+ OdmConfigs++;
+ }
+}
+
+/**
+ * RmInitPinMux will program the pin mux settings for all IO controllers to
+ * the ODM-selected value (or a safe reset value, if no value is defined in
+ * the ODM query.
+ *
+ * It will also read the current value of the tristate registers, to
+ * initialize the reference count
+ */
+void NvRmInitPinMux(
+ NvRmDeviceHandle hDevice,
+ NvBool First)
+{
+ NvU32 i, j, curr;
+
+ if (!hDevice->PinMuxTable)
+ {
+ switch (hDevice->ChipId.Id) {
+ case 0x15:
+ hDevice->PinMuxTable = NvRmAp15GetPinMuxConfigs(hDevice); break;
+ case 0x16:
+ hDevice->PinMuxTable = NvRmAp16GetPinMuxConfigs(hDevice); break;
+ case 0x20:
+ hDevice->PinMuxTable = NvRmAp20GetPinMuxConfigs(hDevice); break;
+ default:
+ NV_ASSERT(!"Unsupported chip ID");
+ hDevice->PinMuxTable = NULL;
+ return;
+ }
+
+ NvOsMutexLock(hDevice->mutex);
+ NvOsMemset(hDevice->TristateRefCount, 0,
+ sizeof(hDevice->TristateRefCount));
+
+ for (i=0; i<=((APB_MISC_PP_TRISTATE_REG_D_0-
+ APB_MISC_PP_TRISTATE_REG_A_0)>>2); i++)
+ {
+ curr = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_TRISTATE_REG_A_0 + 4*i);
+ // swap from 0=normal, 1=tristate to 0=tristate, 1=normal
+ curr = ~curr;
+ for (j=0; curr; j++, curr>>=1)
+ {
+ /* the oppositely-named tristate reference count keeps track
+ * of the number of active users of each pad group, and
+ * enables tristate when the count reaches zero. */
+ hDevice->TristateRefCount[i*32 + j] = (NvS16)(curr & 0x1);
+ }
+ }
+ NvOsMutexUnlock(hDevice->mutex);
+ }
+
+#if (!NVOS_IS_WINDOWS_CE || NV_OAL)
+ NvRmPrivApplyAllPinMuxes(hDevice, First);
+#endif
+
+}
+
+
+/* RmPinMuxConfigSelect sets a specific module to a specific configuration.
+ * It is used for multiplexed controllers, and should only be called by the
+ * ODM service function NvOdmPinMuxSet */
+void NvRmPinMuxConfigSelect(
+ NvRmDeviceHandle hDevice,
+ NvOdmIoModule IoModule,
+ NvU32 Instance,
+ NvU32 Configuration)
+{
+ const NvU32 ***ModulePrograms = NULL;
+ const NvU32 **InstancePrograms = NULL;
+ NvU32 i = 0;
+
+ NV_ASSERT(hDevice);
+ if (!hDevice)
+ return;
+
+ ModulePrograms = hDevice->PinMuxTable;
+ NV_ASSERT(ModulePrograms && ((NvU32)IoModule < (NvU32)NvOdmIoModule_Num));
+
+ InstancePrograms = (const NvU32**)ModulePrograms[(NvU32)IoModule];
+
+ /* Walk through the instance arrays for this module, breaking
+ * when either the requested instance or the end of the list is
+ * reached. */
+ if (InstancePrograms)
+ {
+ while (i<Instance && *InstancePrograms)
+ {
+ i++;
+ InstancePrograms++;
+ }
+
+ if (*InstancePrograms)
+ {
+ NvRmPrivSetPinMuxCtl(hDevice, *InstancePrograms, Configuration);
+ }
+ }
+}
+
+/* RmPinMuxConfigSetTristate will either enable or disable the tristate for a
+ * specific IO module configuration. It is called by the ODM service function
+ * OdmPinMuxConfigSetTristate, and by the RM function SetModuleTristate. RM
+ * client drivers should only call RmSetModuleTristate, which will program the
+ * tristate correctly based on the ODM query configuration. */
+void NvRmPinMuxConfigSetTristate(
+ NvRmDeviceHandle hDevice,
+ NvOdmIoModule IoModule,
+ NvU32 Instance,
+ NvU32 Configuration,
+ NvBool EnableTristate)
+{
+ const NvU32 ***ModulePrograms = NULL;
+ const NvU32 **InstancePrograms = NULL;
+ NvU32 i = 0;
+
+ NV_ASSERT(hDevice);
+ if (!hDevice)
+ return;
+
+ ModulePrograms = hDevice->PinMuxTable;
+
+ NV_ASSERT(ModulePrograms && ((NvU32)IoModule < (NvU32)NvOdmIoModule_Num));
+
+ InstancePrograms = (const NvU32**)ModulePrograms[(NvU32)IoModule];
+
+ if (InstancePrograms)
+ {
+ while (i<Instance && *InstancePrograms)
+ {
+ i++;
+ InstancePrograms++;
+ }
+
+ if (*InstancePrograms)
+ {
+ NvRmPrivSetPadTristates(hDevice, *InstancePrograms,
+ Configuration, EnableTristate);
+ }
+ }
+}
+
+NvError NvRmSetOdmModuleTristate(
+ NvRmDeviceHandle hDevice,
+ NvU32 OdmModule,
+ NvU32 OdmInstance,
+ NvBool EnableTristate)
+{
+ const NvU32 *OdmConfigs;
+ NvU32 NumOdmConfigs;
+
+ NV_ASSERT(hDevice);
+ if (!hDevice)
+ return NvError_BadParameter;
+
+ NvOdmQueryPinMux(OdmModule, &OdmConfigs, &NumOdmConfigs);
+
+ if ((OdmInstance >= NumOdmConfigs) || !OdmConfigs[OdmInstance])
+ return NvError_NotSupported;
+
+ NvRmPinMuxConfigSetTristate(hDevice, OdmModule,
+ OdmInstance, OdmConfigs[OdmInstance], EnableTristate);
+
+ return NvSuccess;
+}
+
+NvU32 NvRmPrivRmModuleToOdmModule(
+ NvU32 ChipId,
+ NvU32 RmModule,
+ NvOdmIoModule *pOdmModules,
+ NvU32 *pOdmInstances)
+{
+ NvU32 Cnt = 0;
+ NvBool Result = NV_FALSE;
+
+ NV_ASSERT(pOdmModules && pOdmInstances);
+
+ if (ChipId==0x15)
+ {
+ Result = NvRmPrivAp15RmModuleToOdmModule(RmModule,
+ pOdmModules, pOdmInstances, &Cnt);
+ }
+ else if (ChipId==0x16)
+ {
+ Result = NvRmPrivAp16RmModuleToOdmModule(RmModule,
+ pOdmModules, pOdmInstances, &Cnt);
+ }
+ else if (ChipId==0x20)
+ {
+ Result = NvRmPrivAp20RmModuleToOdmModule(RmModule,
+ pOdmModules, pOdmInstances, &Cnt);
+ }
+
+ /* A default mapping is provided for all standard I/O controllers,
+ * if the chip-specific implementation does not implement a mapping */
+ if (!Result)
+ {
+ NvRmModuleID Module = NVRM_MODULE_ID_MODULE(RmModule);
+ NvU32 Instance = NVRM_MODULE_ID_INSTANCE(RmModule);
+
+ Cnt = 1;
+ switch (Module) {
+ case NvRmModuleID_Display:
+ *pOdmModules = NvOdmIoModule_Display;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Ide:
+ *pOdmModules = NvOdmIoModule_Ata;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Vi:
+ *pOdmModules = NvOdmIoModule_VideoInput;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Usb2Otg:
+ *pOdmModules = NvOdmIoModule_Usb;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Pwm:
+ *pOdmModules = NvOdmIoModule_Pwm;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Twc:
+ *pOdmModules = NvOdmIoModule_Twc;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Hsmmc:
+ *pOdmModules = NvOdmIoModule_Hsmmc;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Sdio:
+ *pOdmModules = NvOdmIoModule_Sdio;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Nand:
+ *pOdmModules = NvOdmIoModule_Nand;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_I2c:
+ *pOdmModules = NvOdmIoModule_I2c;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Spdif:
+ *pOdmModules = NvOdmIoModule_Spdif;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Uart:
+ *pOdmModules = NvOdmIoModule_Uart;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Csi:
+ *pOdmModules = NvOdmIoModule_Csi;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Hdmi:
+ *pOdmModules = NvOdmIoModule_Hdmi;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Mipi:
+ *pOdmModules = NvOdmIoModule_Hsi;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Tvo:
+ pOdmModules[0] = NvOdmIoModule_Tvo;
+ pOdmModules[1] = NvOdmIoModule_Crt;
+ pOdmInstances[0] = 0;
+ pOdmInstances[1] = 0;
+ Cnt = 2;
+ break;
+ case NvRmModuleID_Dsi:
+ *pOdmModules = NvOdmIoModule_Dsi;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Dvc:
+ *pOdmModules = NvOdmIoModule_I2c_Pmu;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmPrivModuleID_Mio_Exio:
+ *pOdmModules = NvOdmIoModule_Mio;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Xio:
+ *pOdmModules = NvOdmIoModule_Xio;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Spi:
+ *pOdmModules = NvOdmIoModule_Sflash;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Slink:
+ *pOdmModules = NvOdmIoModule_Spi;
+ *pOdmInstances = Instance;
+ break;
+ case NvRmModuleID_Kbc:
+ *pOdmModules = NvOdmIoModule_Kbd;
+ *pOdmInstances = Instance;
+ break;
+ default:
+ // all the RM modules which have no ODM analogs (like 3d)
+ Cnt = 0;
+ break;
+ }
+ }
+
+ return Cnt;
+}
+
+/* RmSetModuleTristate will enable / disable the pad tristates for the
+ * selected pin mux configuration of an IO module. */
+NvError NvRmSetModuleTristate(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID RmModule,
+ NvBool EnableTristate)
+{
+ const NvU32 *OdmConfigs;
+ NvU32 NumOdmConfigs;
+ NvU32 OdmModules[4];
+ NvU32 OdmInstances[4];
+ NvU32 NumOdmModules = 0;
+ NvU32 i;
+
+ NV_ASSERT(hDevice);
+ if (!hDevice)
+ return NvError_BadParameter;
+
+ NumOdmModules =
+ NvRmPrivRmModuleToOdmModule(hDevice->ChipId.Id,
+ RmModule, (NvOdmIoModule*)OdmModules, OdmInstances);
+ if (!NumOdmModules)
+ return NvError_NotSupported;
+
+ /* return NotSupported if the ODM has not defined a pin mux configuration
+ * for this module. */
+ for (i=0; i<NumOdmModules; i++)
+ {
+ NvOdmQueryPinMux(OdmModules[i], &OdmConfigs, &NumOdmConfigs);
+ if ((!NumOdmConfigs) || (!OdmConfigs[OdmInstances[i]]))
+ return NvError_NotSupported;
+ if (OdmInstances[i] >= NumOdmConfigs)
+ {
+ NV_DEBUG_PRINTF(("Attempted to set TRISTATE for Module %u, Instance"
+ " %u (ODM module %u instance %u) with undefined config\n",
+ NVRM_MODULE_ID_MODULE(RmModule),
+ NVRM_MODULE_ID_INSTANCE(RmModule),
+ OdmModules[i], OdmInstances[i]));
+ return NvError_NotSupported;
+ // NV_ASSERT(OdmInstances[i] < NumOdmConfigs);
+ }
+ }
+
+ for (i=0; i<NumOdmModules; i++)
+ {
+ NvOdmQueryPinMux(OdmModules[i], &OdmConfigs, &NumOdmConfigs);
+ NV_ASSERT(OdmInstances[i] < NumOdmConfigs);
+ NvRmPinMuxConfigSetTristate(hDevice, OdmModules[i],
+ OdmInstances[i], OdmConfigs[OdmInstances[i]], EnableTristate);
+ }
+ return NvSuccess;
+}
+
+void NvRmSetGpioTristate(
+ NvRmDeviceHandle hDevice,
+ NvU32 Port,
+ NvU32 Pin,
+ NvBool EnableTristate)
+{
+ NvU32 Mapping = 0;
+ NvS16 SkipUpdate;
+ NvBool ret = NV_FALSE;
+
+ NV_ASSERT(hDevice);
+
+ switch (hDevice->ChipId.Id) {
+ case 0x15:
+ case 0x16:
+ ret = NvRmAp15GetPinGroupForGpio(hDevice, Port, Pin, &Mapping);
+ break;
+ case 0x20:
+ ret = NvRmAp20GetPinGroupForGpio(hDevice, Port, Pin, &Mapping);
+ break;
+ default:
+ NV_ASSERT(!"Chip ID not supported");
+ return;
+ }
+
+ if (ret)
+ {
+ NvU32 TsOffs = NV_DRF_VAL(MUX, GPIOMAP, TS_OFFSET, Mapping);
+ NvU32 TsShift = NV_DRF_VAL(MUX, GPIOMAP, TS_SHIFT, Mapping);
+
+ NvOsMutexLock(hDevice->mutex);
+
+ if (EnableTristate)
+#if (SKIP_TRISTATE_REFCNT == 0)
+ SkipUpdate = --hDevice->TristateRefCount[TsOffs*32 + TsShift];
+ else
+ SkipUpdate = hDevice->TristateRefCount[TsOffs*32 + TsShift]++;
+#else
+ SkipUpdate = 1;
+ else
+ SkipUpdate = 0;
+#endif
+
+#if (SKIP_TRISTATE_REFCNT == 0)
+ if (SkipUpdate < 0)
+ {
+ hDevice->TristateRefCount[TsOffs*32 + TsShift] = 0;
+ NV_DEBUG_PRINTF(("(%s:%s) Negative reference count detected on "
+ "TRISTATE_REG_%c_0, bit %u\n", __FILE__, __LINE__,
+ ('A'+(TsOffs)), TsShift));
+ //NV_ASSERT(SkipUpdate>=0);
+ }
+#endif
+
+ if (!SkipUpdate)
+ {
+ NvU32 Curr = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_TRISTATE_REG_A_0 + 4*TsOffs);
+ Curr &= ~(1<<TsShift);
+#if (SKIP_TRISTATE_REFCNT == 0)
+ Curr |= (EnableTristate?1:0)<<TsShift;
+#endif
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_TRISTATE_REG_A_0 + 4*TsOffs, Curr);
+ }
+
+ NvOsMutexUnlock(hDevice->mutex);
+ }
+}
+
+NvU32 NvRmExternalClockConfig(
+ NvRmDeviceHandle hDevice,
+ NvU32 IoModule,
+ NvU32 Instance,
+ NvU32 Config,
+ NvBool EnableTristate)
+{
+ const NvU32 ***ModulePrograms = NULL;
+ const NvU32 **InstancePrograms = NULL;
+ const NvU32 *CdevInstance;
+ NvU32 i = 0;
+ NvU32 ret = 0;
+
+ void (*pfnEnableExtClock)(NvRmDeviceHandle, const NvU32 *, NvU32, NvBool);
+ NvU32 (*pfnGetExtClockFreq)(NvRmDeviceHandle, const NvU32 *, NvU32);
+
+ NV_ASSERT(hDevice);
+
+ if (!hDevice)
+ return NvError_BadParameter;
+
+ switch (hDevice->ChipId.Id) {
+ case 0x15:
+ case 0x16:
+ pfnEnableExtClock = NvRmPrivAp15EnableExternalClockSource;
+ pfnGetExtClockFreq = NvRmPrivAp15GetExternalClockSourceFreq;
+ break;
+ case 0x20:
+ pfnEnableExtClock = NvRmPrivAp20EnableExternalClockSource;
+ pfnGetExtClockFreq = NvRmPrivAp20GetExternalClockSourceFreq;
+ break;
+ default:
+ NV_ASSERT(!"Unsupported Chip ID");
+ return 0;
+ }
+
+ ModulePrograms = hDevice->PinMuxTable;
+
+ NV_ASSERT(IoModule==NvOdmIoModule_ExternalClock);
+
+ NV_ASSERT(ModulePrograms && ((NvU32)IoModule < (NvU32)NvOdmIoModule_Num));
+
+ InstancePrograms = (const NvU32**)ModulePrograms[(NvU32)IoModule];
+
+ if (InstancePrograms)
+ {
+ while (i<Instance && *InstancePrograms)
+ {
+ i++;
+ InstancePrograms++;
+ }
+
+ if (*InstancePrograms)
+ {
+ if (!EnableTristate)
+ NvRmPrivSetPinMuxCtl(hDevice, *InstancePrograms, Config);
+
+ NvRmPrivSetPadTristates(hDevice, *InstancePrograms,
+ Config, EnableTristate);
+ CdevInstance = NvRmPrivFindConfigStart(*InstancePrograms,
+ Config, MODULEDONE());
+ pfnEnableExtClock(hDevice, CdevInstance, Config, !EnableTristate);
+ ret = pfnGetExtClockFreq(hDevice, CdevInstance, Config);
+ }
+ }
+ return ret;
+}
+
+NvError NvRmGetModuleInterfaceCapabilities(
+ NvRmDeviceHandle hRm,
+ NvRmModuleID ModuleId,
+ NvU32 CapStructSize,
+ void *pCaps)
+{
+ NvU32 NumOdmConfigs;
+ const NvU32 *OdmConfigs;
+ NvOdmIoModule OdmModules[4];
+ NvU32 OdmInstances[4];
+ NvU32 NumOdmModules;
+ NvError (*pfnInterfaceCaps)(NvOdmIoModule,NvU32,NvU32,void*);
+
+ NV_ASSERT(hRm);
+ NV_ASSERT(pCaps);
+
+ if (!hRm || !pCaps)
+ return NvError_BadParameter;
+
+ switch (hRm->ChipId.Id) {
+ case 0x15:
+ pfnInterfaceCaps = NvRmPrivAp15GetModuleInterfaceCaps;
+ break;
+ case 0x16:
+ pfnInterfaceCaps = NvRmPrivAp16GetModuleInterfaceCaps;
+ break;
+ case 0x20:
+ pfnInterfaceCaps = NvRmPrivAp20GetModuleInterfaceCaps;
+ break;
+ default:
+ NV_ASSERT(!"Unsupported chip ID!");
+ return NvError_NotSupported;
+ }
+
+ NumOdmModules =
+ NvRmPrivRmModuleToOdmModule(hRm->ChipId.Id, ModuleId,
+ (NvOdmIoModule *)OdmModules, OdmInstances);
+ NV_ASSERT(NumOdmModules<=1);
+
+ if (!NumOdmModules)
+ return NvError_NotSupported;
+
+ switch (OdmModules[0]) {
+ case NvOdmIoModule_Hsmmc:
+ case NvOdmIoModule_Sdio:
+ if (CapStructSize != sizeof(NvRmModuleSdmmcInterfaceCaps))
+ {
+ NV_ASSERT(!"Invalid cap struct size");
+ return NvError_BadParameter;
+ }
+ break;
+ case NvOdmIoModule_Pwm:
+ if (CapStructSize != sizeof(NvRmModulePwmInterfaceCaps))
+ {
+ NV_ASSERT(!"Invalid cap struct size");
+ return NvError_BadParameter;
+ }
+ break;
+ case NvOdmIoModule_Nand:
+ if (CapStructSize != sizeof(NvRmModuleNandInterfaceCaps))
+ {
+ NV_ASSERT(!"Invalid cap struct size");
+ return NvError_BadParameter;
+ }
+ break;
+
+ case NvOdmIoModule_Uart:
+ if (CapStructSize != sizeof(NvRmModuleUartInterfaceCaps))
+ {
+ NV_ASSERT(!"Invalid cap struct size");
+ return NvError_BadParameter;
+ }
+ break;
+
+ default:
+ return NvError_NotSupported;
+ }
+
+ NvOdmQueryPinMux(OdmModules[0], &OdmConfigs, &NumOdmConfigs);
+ if (OdmInstances[0]>=NumOdmConfigs || !OdmConfigs[OdmInstances[0]])
+ return NvError_NotSupported;
+
+ return pfnInterfaceCaps(OdmModules[0],OdmInstances[0],
+ OdmConfigs[OdmInstances[0]],pCaps);
+}
+
+NvError NvRmGetStraps(
+ NvRmDeviceHandle hDevice,
+ NvRmStrapGroup StrapGroup,
+ NvU32* pStrapValue)
+{
+ NV_ASSERT(hDevice && pStrapValue);
+
+ if (!hDevice || !pStrapValue)
+ return NvError_BadParameter;
+
+ switch (hDevice->ChipId.Id) {
+ case 0x15:
+ case 0x16:
+ return NvRmAp15GetStraps(hDevice, StrapGroup, pStrapValue);
+ case 0x20:
+ return NvRmAp20GetStraps(hDevice, StrapGroup, pStrapValue);
+ default:
+ NV_ASSERT(!"Unsupported Chip ID");
+ return 0;
+ }
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_pinmux_utils.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_pinmux_utils.h
new file mode 100644
index 000000000000..f91170d3b07c
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_pinmux_utils.h
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef NVRM_PINMUX_UTILS_H
+#define NVRM_PINMUX_UTILS_H
+
+/*
+ * nvrm_pinmux_utils.h defines the pinmux macros to implement for the resource
+ * manager.
+ */
+
+#include "nvcommon.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_drf.h"
+#include "nvassert.h"
+#include "nvrm_hwintf.h"
+#include "nvodm_modules.h"
+#include "ap15/ap15rm_private.h"
+#include "ap16/arapb_misc.h"
+
+
+/* The pin mux code supports run-time trace debugging of all updates to the
+ * pin mux & tristate registers by embedding strings (cast to NvU32s) into the
+ * control tables.
+ */
+#define NVRM_PINMUX_DEBUG_FLAG 0
+#define NVRM_PINMUX_SET_OPCODE_SIZE_RANGE 3:1
+
+
+#if NVRM_PINMUX_DEBUG_FLAG
+NV_CT_ASSERT(sizeof(NvU32)==sizeof(const char*));
+#endif
+
+// The extra strings bloat the size of Set/Unset opcodes
+#define NVRM_PINMUX_SET_OPCODE_SIZE ((NVRM_PINMUX_DEBUG_FLAG)?NVRM_PINMUX_SET_OPCODE_SIZE_RANGE)
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+typedef enum {
+ PinMuxConfig_OpcodeExtend = 0,
+ PinMuxConfig_Set = 1,
+ PinMuxConfig_Unset = 2,
+ PinMuxConfig_BranchLink = 3,
+} PinMuxConfigStates;
+
+typedef enum {
+ PinMuxOpcode_ConfigEnd = 0,
+ PinMuxOpcode_ModuleDone = 1,
+ PinMuxOpcode_SubroutinesDone = 2,
+} PinMuxConfigExtendOpcodes;
+
+// When the state is BranchLink, this is the number of words to increment the current "PC"
+#define MUX_ENTRY_0_BRANCH_ADDRESS_RANGE 31:2
+// The incr1 offset from TRISTATE_REG_A_0 to the pad group's tristate register
+#define MUX_ENTRY_0_TS_OFFSET_RANGE 31:26
+// The bit position within the tristate register for the pad group
+#define MUX_ENTRY_0_TS_SHIFT_RANGE 25:21
+// The incr1 offset from PIN_MUX_CTL_A_0 to the pad group's pin mux control register
+#define MUX_ENTRY_0_MUX_CTL_OFFSET_RANGE 20:17
+// The bit position within the pin mux control register for the pad group
+#define MUX_ENTRY_0_MUX_CTL_SHIFT_RANGE 16:12
+// The mask for the pad group -- expanded to 3b for forward-compatibility
+#define MUX_ENTRY_0_MUX_CTL_MASK_RANGE 10:8
+// When a pad group needs to be owned (or disowned), this value is applied
+#define MUX_ENTRY_0_MUX_CTL_SET_RANGE 7:5
+// This value is compared against, to determine if the pad group should be disowned
+#define MUX_ENTRY_0_MUX_CTL_UNSET_RANGE 4:2
+// for extended opcodes, this field is set with the extended opcode
+#define MUX_ENTRY_0_OPCODE_EXTENSION_RANGE 3:2
+// The state for this entry
+#define MUX_ENTRY_0_STATE_RANGE 1:0
+
+
+#define MAX_NESTING_DEPTH 4
+
+/* This macro is used to generate 32b value to program the tristate& pad mux control
+ * registers for config/unconfig for a padgroup
+ */
+#define PIN_MUX_ENTRY(TSOFF,TSSHIFT,MUXOFF,MUXSHIFT,MUXMASK,MUXSET,MUXUNSET,STAT) \
+ (NV_DRF_NUM(MUX, ENTRY, TS_OFFSET, TSOFF) | NV_DRF_NUM(MUX, ENTRY, TS_SHIFT, TSSHIFT) | \
+ NV_DRF_NUM(MUX, ENTRY, MUX_CTL_OFFSET, MUXOFF) | NV_DRF_NUM(MUX, ENTRY, MUX_CTL_SHIFT, MUXSHIFT) | \
+ NV_DRF_NUM(MUX, ENTRY,MUX_CTL_MASK, MUXMASK) | NV_DRF_NUM(MUX, ENTRY,MUX_CTL_SET, MUXSET) | \
+ NV_DRF_NUM(MUX, ENTRY, MUX_CTL_UNSET,MUXUNSET) | NV_DRF_NUM(MUX, ENTRY, STATE,STAT))
+
+// This is used to program the tristate & pad mux control registers for a pad group
+#define CONFIG_VAL(TRISTATE_REG, MUXCTL_REG, PADGROUP, MUX) \
+ (PIN_MUX_ENTRY(((APB_MISC_PP_TRISTATE_REG_##TRISTATE_REG##_0 - APB_MISC_PP_TRISTATE_REG_A_0)>>2), \
+ APB_MISC_PP_TRISTATE_REG_##TRISTATE_REG##_0_Z_##PADGROUP##_SHIFT, \
+ ((APB_MISC_PP_PIN_MUX_CTL_##MUXCTL_REG##_0 - APB_MISC_PP_PIN_MUX_CTL_A_0) >> 2), \
+ APB_MISC_PP_PIN_MUX_CTL_##MUXCTL_REG##_0_##PADGROUP##_SEL_SHIFT, \
+ APB_MISC_PP_PIN_MUX_CTL_##MUXCTL_REG##_0_##PADGROUP##_SEL_DEFAULT_MASK, \
+ APB_MISC_PP_PIN_MUX_CTL_##MUXCTL_REG##_0_##PADGROUP##_SEL_##MUX, \
+ 0, PinMuxConfig_Set))
+
+/* This macro is used to compare a pad group against a potentially conflicting
+ * enum (where the conflict is caused by setting a new config), and to resolve the
+ * conflict by setting the conflicting pad group to a different, non-conflicting option.
+ * Read this as: if (PADGROUP) is equal to (CONFLICTMUX), replace it with (RESOLUTIONMUX)
+ */
+#define UNCONFIG_VAL(MUXCTL_REG, PADGROUP, CONFLICTMUX, RESOLUTIONMUX) \
+ (PIN_MUX_ENTRY(0, 0, \
+ ((APB_MISC_PP_PIN_MUX_CTL_##MUXCTL_REG##_0 - APB_MISC_PP_PIN_MUX_CTL_A_0) >> 2), \
+ APB_MISC_PP_PIN_MUX_CTL_##MUXCTL_REG##_0_##PADGROUP##_SEL_SHIFT, \
+ APB_MISC_PP_PIN_MUX_CTL_##MUXCTL_REG##_0_##PADGROUP##_SEL_DEFAULT_MASK, \
+ APB_MISC_PP_PIN_MUX_CTL_##MUXCTL_REG##_0_##PADGROUP##_SEL_##RESOLUTIONMUX, \
+ APB_MISC_PP_PIN_MUX_CTL_##MUXCTL_REG##_0_##PADGROUP##_SEL_##CONFLICTMUX, \
+ PinMuxConfig_Unset))
+
+#if NVRM_PINMUX_DEBUG_FLAG
+#define CONFIG(TRISTATE_REG, MUXCTL_REG, PADGROUP, MUX) \
+ (CONFIG_VAL(TRISTATE_REG, MUXCTL_REG, PADGROUP, MUX)), \
+ (NvU32)(const void*)(#MUXCTL_REG "_0_" #PADGROUP "_SEL to " #MUX), \
+ (NvU32)(const void*)(#TRISTATE_REG "_0_Z_" #PADGROUP)
+
+#define UNCONFIG(MUXCTL_REG, PADGROUP, CONFLICTMUX, RESOLUTIONMUX) \
+ (UNCONFIG_VAL(MUXCTL_REG, PADGROUP, CONFLICTMUX, RESOLUTIONMUX)), \
+ (NvU32)(const void*)(#MUXCTL_REG "_0_" #PADGROUP "_SEL from " #CONFLICTMUX " to " #RESOLUTIONMUX), \
+ (NvU32)(const void*)(NULL)
+#else
+#define CONFIG(TRISTATE_REG, MUXCTL_REG, PADGROUP, MUX) \
+ (CONFIG_VAL(TRISTATE_REG, MUXCTL_REG, PADGROUP, MUX))
+#define UNCONFIG(MUXCTL_REG, PADGROUP, CONFLICTMUX, RESOLUTIONMUX) \
+ (UNCONFIG_VAL(MUXCTL_REG, PADGROUP, CONFLICTMUX, RESOLUTIONMUX))
+#endif
+
+/* This macro is used for opcode entries in the tables */
+#define PIN_MUX_OPCODE(_OP_) \
+ (NV_DRF_NUM(MUX,ENTRY,STATE,PinMuxConfig_OpcodeExtend) | \
+ NV_DRF_NUM(MUX,ENTRY,OPCODE_EXTENSION,(_OP_)))
+
+/* This is a dummy entry in the array which indicates that all setting/unsetting for
+ * a configuration is complete. */
+#define CONFIGEND() PIN_MUX_OPCODE(PinMuxOpcode_ConfigEnd)
+
+/* This is a dummy entry in the array which indicates that the last configuration
+ * for the module instance has been passed. */
+#define MODULEDONE() PIN_MUX_OPCODE(PinMuxOpcode_ModuleDone)
+
+/* This is a dummy entry in the array which indicates that all "extra" configurations
+ * used by sub-routines have been passed. */
+#define SUBROUTINESDONE() PIN_MUX_OPCODE(PinMuxOpcode_SubroutinesDone)
+
+/* This macro is used to insert a branch-and-link from one configuration to another */
+#define BRANCH(_ADDR_) \
+ (NV_DRF_NUM(MUX,ENTRY,STATE,PinMuxConfig_BranchLink) | \
+ NV_DRF_NUM(MUX,ENTRY,BRANCH_ADDRESS,(_ADDR_)))
+
+
+// The below entries define the table format for GPIO Port/Pin-to-Tristate register mappings
+// Each table entry is 16b, and one is stored for every GPIO Port/Pin on the chip
+#define MUX_GPIOMAP_0_TS_OFFSET_RANGE 15:10
+// Defines where in the 32b register the tristate control is located
+#define MUX_GPIOMAP_0_TS_SHIFT_RANGE 4:0
+
+#define TRISTATE_ENTRY(TSOFFS, TSSHIFT) \
+ ((NvU16)(NV_DRF_NUM(MUX,GPIOMAP,TS_OFFSET,(TSOFFS)) | \
+ NV_DRF_NUM(MUX,GPIOMAP,TS_SHIFT,(TSSHIFT))))
+
+#define GPIO_TRISTATE(TRIREG,PADGROUP) \
+ (TRISTATE_ENTRY(((APB_MISC_PP_TRISTATE_REG_##TRIREG##_0 - APB_MISC_PP_TRISTATE_REG_A_0)>>2), \
+ APB_MISC_PP_TRISTATE_REG_##TRIREG##_0_Z_##PADGROUP##_SHIFT))
+
+
+/** RmInitPinMux will program the pin mux settings for all IO controllers to
+ * the ODM-selected value (or a safe reset value, if no value is defined in
+ * the ODM query.
+ * It will also read the current value of the tristate registers, to
+ * initialize the reference count
+ *
+ * @param hDevice The RM instance
+ * @param First Indicates whether to perform just safe-reset and DVC
+ * initialization, for early boot, or full initialization
+ */
+void NvRmInitPinMux(
+ NvRmDeviceHandle hDevice,
+ NvBool First);
+
+/** RmPinMuxConfigSelect sets a specific module to a specific configuration. It is used
+ * for multiplexed controllers, and should only be called by modules which support
+ * multiplexing. Note that this interface uses the IoModule enumerant, not the RmModule.
+ *
+ *@param hDevice The RM instance
+ *@param IoModule The module to set
+ *@param Instance The instance number of the Module
+ *@param Configuaration The module's configuration to set
+ */
+
+void NvRmPinMuxConfigSelect(
+ NvRmDeviceHandle hDevice,
+ NvOdmIoModule IoModule,
+ NvU32 Instance,
+ NvU32 Configuration);
+
+/** RmPinMuxConfigSetTristate will either enable or disable the tristate for a specific
+ * IO module configuration. It is used for multiplexed controllers, and should only be
+ * called by modules which support multiplexing. Note that this interface uses the
+ * IoModule enumerant, not the RmModule.
+ *
+ *@param hDevice The RM instance
+ *@param RMModule The module to set
+ *@param Instance The instance number of the module.
+ *@param Configuaration The module's configuration to set
+ *@param EnableTristate NV_TRUE will tristate the specified pins, NV_FALSE will un-tristate
+ */
+
+void NvRmPinMuxConfigSetTristate(
+ NvRmDeviceHandle hDevice,
+ NvOdmIoModule IoModule,
+ NvU32 Instance,
+ NvU32 Configuration,
+ NvBool EnableTristate);
+
+/** NvRmSetGpioTristate will either enable or disable the tristate for GPIO ports.
+ * RM client gpio should only call NvRmSetGpioTristate,
+ * which will program the tristate correctly based pins of the particular port.
+ *
+ *@param hDevice The RM instance
+ *@param Port The GPIO port to set
+ *@param Pin The Pinnumber of the port to set.
+ *@param EnableTristate NV_TRUE will tristate the specified pins, NV_FALSE will un-tristate
+ */
+void NvRmSetGpioTristate(
+ NvRmDeviceHandle hDevice,
+ NvU32 Port,
+ NvU32 Pin,
+ NvBool EnableTristate);
+
+/** NvRmPrivRmModuleToOdmModule will perform the mapping of RM modules to
+ * ODM modules and instances, using the chip-specific mapping wherever
+ * necessary */
+NvU32 NvRmPrivRmModuleToOdmModule(
+ NvU32 ChipId,
+ NvU32 RmModule,
+ NvOdmIoModule *pOdmModules,
+ NvU32 *pOdmInstances);
+
+
+// Forward declarations for all chip-specific helper functions
+NvError NvRmPrivAp15GetModuleInterfaceCaps(
+ NvOdmIoModule Module,
+ NvU32 Instance,
+ NvU32 Config,
+ void* pCaps);
+
+NvError NvRmPrivAp16GetModuleInterfaceCaps(
+ NvOdmIoModule Module,
+ NvU32 Instance,
+ NvU32 Config,
+ void* pCaps);
+
+NvError NvRmPrivAp20GetModuleInterfaceCaps(
+ NvOdmIoModule Module,
+ NvU32 Instance,
+ NvU32 Config,
+ void* pCaps);
+
+const NvU32*** NvRmAp15GetPinMuxConfigs(NvRmDeviceHandle hDevice);
+
+const NvU32*** NvRmAp16GetPinMuxConfigs(NvRmDeviceHandle hDevice);
+
+const NvU32*** NvRmAp20GetPinMuxConfigs(NvRmDeviceHandle hDevice);
+
+NvBool NvRmAp15GetPinGroupForGpio(
+ NvRmDeviceHandle hDevice,
+ NvU32 Port,
+ NvU32 Pin,
+ NvU32 *pMapping);
+
+NvBool NvRmAp20GetPinGroupForGpio(
+ NvRmDeviceHandle hDevice,
+ NvU32 Port,
+ NvU32 Pin,
+ NvU32* pMapping);
+
+void NvRmPrivAp15EnableExternalClockSource(
+ NvRmDeviceHandle hDevice,
+ const NvU32* pModuleProgram,
+ NvU32 Config,
+ NvBool EnableClock);
+
+void NvRmPrivAp20EnableExternalClockSource(
+ NvRmDeviceHandle hDevice,
+ const NvU32* pModuleProgram,
+ NvU32 Config,
+ NvBool EnableClock);
+
+NvU32 NvRmPrivAp15GetExternalClockSourceFreq(
+ NvRmDeviceHandle hDevice,
+ const NvU32* pModuleProgram,
+ NvU32 Config);
+
+NvU32 NvRmPrivAp20GetExternalClockSourceFreq(
+ NvRmDeviceHandle hDevice,
+ const NvU32* pModuleProgram,
+ NvU32 Config);
+
+NvBool NvRmPrivAp15RmModuleToOdmModule(
+ NvRmModuleID ModuleID,
+ NvOdmIoModule* pOdmModules,
+ NvU32* pOdmInstances,
+ NvU32 *pCnt);
+
+NvBool NvRmPrivAp16RmModuleToOdmModule(
+ NvRmModuleID ModuleID,
+ NvOdmIoModule* pOdmModules,
+ NvU32* pOdmInstances,
+ NvU32 *pCnt);
+
+NvBool NvRmPrivAp20RmModuleToOdmModule(
+ NvRmModuleID ModuldID,
+ NvOdmIoModule* pOdmModules,
+ NvU32* pOdmInstances,
+ NvU32 *pCnt);
+
+/**
+ * Chip-specific functions to get SoC strap value for the given strap group.
+ *
+ * @param hDevice The RM instance
+ * @param StrapGroup Strap group to be read.
+ * @pStrapValue A pointer to the returned strap group value.
+ *
+ * @retval NvSuccess if strap value is read successfully
+ * @retval NvError_NotSupported if the specified strap group does not
+ * exist on the current SoC.
+ */
+NvError
+NvRmAp15GetStraps(
+ NvRmDeviceHandle hDevice,
+ NvRmStrapGroup StrapGroup,
+ NvU32* pStrapValue);
+
+NvError
+NvRmAp20GetStraps(
+ NvRmDeviceHandle hDevice,
+ NvRmStrapGroup StrapGroup,
+ NvU32* pStrapValue);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // NVRM_PINMUX_UTILS_H
+
+
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_pmu.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_pmu.c
new file mode 100644
index 000000000000..405a7eeff267
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_pmu.c
@@ -0,0 +1,608 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvrm_pmu.h"
+#include "nvodm_pmu.h"
+#include "nvassert.h"
+#include "nvrm_interrupt.h"
+#include "nvrm_moduleids.h"
+#include "nvrm_pmu_private.h"
+#include "nvrm_power_private.h"
+#include "nvrm_clocks.h"
+#include "nvrm_structure.h"
+#include "nvodm_query_discovery.h"
+#include "nvodm_query.h"
+
+
+// TODO: after testing is completed, remove this macro
+// "1" - keep PMU rails always On, "0" - allows to power Off PMU rails
+#define PMU_RAILS_NEVER_OFF (0)
+
+// Retry count for voltage control API
+#define VOLTAGE_CONTROL_RETRY_CNT (2)
+
+/*
+ * Combines RM PMU object data
+ */
+typedef struct NvRmPmuRec
+{
+ // RM PMU access mutex
+ NvOsMutexHandle hMutex;
+
+ // ODM PMU device handle
+ NvOdmPmuDeviceHandle hOdmPmu;
+
+ // PMU interrupt handle
+ NvOsInterruptHandle hInterrupt;
+
+ // PMU ISR semaphore
+ NvOsSemaphoreHandle hSemaphore;
+
+ // PMU interrupt thread
+ NvOsThreadHandle hThread;
+
+ // PMU interrupt mask state
+ volatile NvBool IntrMasked;
+
+ // PMU interrupt thread abort indicator
+ volatile NvBool AbortThread;
+
+ // IO power rails level detect mask
+ NvU32 IoPwrDetectMask;
+
+ // Unpowered IO rails mask
+ NvU32 NoIoPwrMask;
+} NvRmPmu;
+
+// RM PMU object
+static NvRmPmu s_Pmu;
+
+// PMU supported execution environment flag
+static NvBool s_PmuSupportedEnv = NV_FALSE;
+
+/*****************************************************************************/
+
+static void PmuIsr(void* args)
+{
+ NvRmPmu* pPmu = (NvRmPmu*)args;
+ NvOsSemaphoreSignal(pPmu->hSemaphore);
+}
+
+static void PmuThread(void* args)
+{
+ NvRmPmu* pPmu = (NvRmPmu*)args;
+
+ for (;;)
+ {
+ NvOsSemaphoreWait(pPmu->hSemaphore);
+ if (pPmu->AbortThread)
+ {
+ break;
+ }
+
+ NvOsMutexLock(pPmu->hMutex);
+ if (pPmu->IntrMasked)
+ {
+ NvOsMutexUnlock(pPmu->hMutex);
+ continue;
+ }
+ NvOdmPmuInterruptHandler(pPmu->hOdmPmu);
+ NvOsMutexUnlock(pPmu->hMutex);
+
+ if (pPmu->hInterrupt)
+ NvRmInterruptDone(pPmu->hInterrupt);
+ }
+}
+
+static void PmuThreadTerminate(NvRmPmu* pPmu)
+{
+ /*
+ * Request thread abort, signal semaphore to make sure the thread is
+ * awaken and wait for its self-termination. Do nothing if invalid PMU
+ * structure
+ */
+ if (pPmu)
+ {
+ if (pPmu->hSemaphore && pPmu->hThread)
+ {
+ pPmu->AbortThread = NV_TRUE;
+ NvOsSemaphoreSignal(pPmu->hSemaphore);
+ NvOsThreadJoin(pPmu->hThread);
+ }
+ pPmu->AbortThread = NV_FALSE;
+ }
+}
+
+/*****************************************************************************/
+
+NvError NvRmPrivPmuInit(NvRmDeviceHandle hRmDevice)
+{
+ NvError e;
+ ExecPlatform env;
+ NvOdmPmuProperty PmuProperty;
+
+ NV_ASSERT(hRmDevice);
+ env = NvRmPrivGetExecPlatform(hRmDevice);
+
+ NvOsMemset(&s_Pmu, 0, sizeof(NvRmPmu));
+ s_PmuSupportedEnv = NV_FALSE;
+
+ if (env == ExecPlatform_Soc)
+ {
+ // Set supported environment flag
+ s_PmuSupportedEnv = NV_TRUE;
+
+ // Create the PMU mutex, semaphore, interrupt handler thread,
+ // register PMU interrupt, and get ODM PMU handle
+ NV_CHECK_ERROR_CLEANUP(NvOsMutexCreate(&s_Pmu.hMutex));
+ NV_CHECK_ERROR_CLEANUP(NvOsSemaphoreCreate(&s_Pmu.hSemaphore, 0));
+
+ if (NvOdmQueryGetPmuProperty(&PmuProperty) && PmuProperty.IrqConnected)
+ {
+ if (hRmDevice->ChipId.Id >= 0x20)
+ NvRmPrivAp20SetPmuIrqPolarity(
+ hRmDevice, PmuProperty.IrqPolarity);
+ else
+ NV_ASSERT(PmuProperty.IrqPolarity ==
+ NvOdmInterruptPolarity_Low);
+ {
+ NvOsInterruptHandler hPmuIsr = PmuIsr;
+ NvU32 PmuExtIrq = NvRmGetIrqForLogicalInterrupt(
+ hRmDevice, NVRM_MODULE_ID(NvRmPrivModuleID_PmuExt, 0), 0);
+ NV_CHECK_ERROR_CLEANUP(NvRmInterruptRegister(hRmDevice, 1,
+ &PmuExtIrq, &hPmuIsr, &s_Pmu, &s_Pmu.hInterrupt, NV_FALSE));
+ }
+ }
+
+ if(!NvOdmPmuDeviceOpen(&s_Pmu.hOdmPmu))
+ {
+ e = NvError_NotInitialized;
+ goto fail;
+ }
+ NV_CHECK_ERROR_CLEANUP(NvOsThreadCreate(PmuThread, &s_Pmu, &s_Pmu.hThread));
+ NvRmPrivIoPowerControlInit(hRmDevice);
+ NvRmPrivCoreVoltageInit(hRmDevice);
+ }
+ return NvSuccess;
+
+fail:
+ NvRmPrivPmuDeinit(hRmDevice);
+ return e;
+}
+
+void NvRmPrivPmuInterruptEnable(NvRmDeviceHandle hRmDevice)
+{
+ if (s_Pmu.hInterrupt)
+ NvRmInterruptEnable(hRmDevice, s_Pmu.hInterrupt);
+}
+
+void NvRmPrivPmuInterruptMask(NvRmDeviceHandle hRmDevice, NvBool mask)
+{
+ if (s_Pmu.hInterrupt)
+ {
+
+ NvOsMutexLock(s_Pmu.hMutex);
+ s_Pmu.IntrMasked = mask;
+ NvOsMutexUnlock(s_Pmu.hMutex);
+
+ if (!mask)
+ NvRmInterruptDone(s_Pmu.hInterrupt);
+ }
+}
+
+void NvRmPrivPmuDeinit(NvRmDeviceHandle hRmDevice)
+{
+ if (s_PmuSupportedEnv == NV_FALSE)
+ return;
+
+ PmuThreadTerminate(&s_Pmu);
+ NvOdmPmuDeviceClose(s_Pmu.hOdmPmu);
+ NvRmInterruptUnregister(hRmDevice, s_Pmu.hInterrupt);
+ NvOsSemaphoreDestroy(s_Pmu.hSemaphore);
+ NvOsMutexDestroy(s_Pmu.hMutex);
+
+ NvOsMemset(&s_Pmu, 0, sizeof(NvRmPmu));
+ s_PmuSupportedEnv = NV_FALSE;
+}
+/*****************************************************************************/
+
+void NvRmPmuGetCapabilities(
+ NvRmDeviceHandle hDevice,
+ NvU32 vddId,
+ NvRmPmuVddRailCapabilities * pCapabilities )
+{
+ NvOdmPmuVddRailCapabilities RailCap;
+
+ if (!s_PmuSupportedEnv)
+ return;
+
+ NvOdmPmuGetCapabilities(vddId, &RailCap);
+ pCapabilities->MaxMilliVolts = RailCap.MaxMilliVolts;
+ pCapabilities->MinMilliVolts = RailCap.MinMilliVolts;
+ pCapabilities->requestMilliVolts = RailCap.requestMilliVolts;
+ pCapabilities->RmProtected = RailCap.OdmProtected;
+ pCapabilities->StepMilliVolts = RailCap.StepMilliVolts;
+}
+
+void NvRmPmuGetVoltage(
+ NvRmDeviceHandle hDevice,
+ NvU32 vddId,
+ NvU32 * pMilliVolts)
+{
+ NvU32 i;
+ if (!s_PmuSupportedEnv)
+ return;
+
+ NV_ASSERT(s_Pmu.hMutex);
+ NvOsMutexLock(s_Pmu.hMutex);
+ for (i = 0; i < VOLTAGE_CONTROL_RETRY_CNT; i++)
+ {
+ if (NvOdmPmuGetVoltage(s_Pmu.hOdmPmu, vddId, pMilliVolts))
+ break;
+ }
+ NV_ASSERT(i < VOLTAGE_CONTROL_RETRY_CNT);
+ NvOsMutexUnlock(s_Pmu.hMutex);
+}
+
+void NvRmPmuSetVoltage(
+ NvRmDeviceHandle hDevice,
+ NvU32 vddId,
+ NvU32 MilliVolts,
+ NvU32 * pSettleMicroSeconds)
+{
+ NvU32 i;
+ NvU32 t = NVRM_PWR_DET_DELAY_US;
+ NV_ASSERT(hDevice);
+
+ if (pSettleMicroSeconds)
+ *pSettleMicroSeconds = 0;
+
+ if (!s_PmuSupportedEnv)
+ return;
+
+ // This API is blocked if diagnostic is in progress for any module
+ if (NvRmPrivIsDiagMode(NvRmModuleID_Invalid))
+ return;
+
+#if PMU_RAILS_NEVER_OFF
+ if (MilliVolts == ODM_VOLTAGE_OFF)
+ return;
+#endif
+
+ NV_ASSERT(s_Pmu.hMutex);
+ NvOsMutexLock(s_Pmu.hMutex);
+
+ // Set voltage and latch IO level sampling results
+ for (i = 0; i < VOLTAGE_CONTROL_RETRY_CNT; i++)
+ {
+ if (NvOdmPmuSetVoltage(s_Pmu.hOdmPmu, vddId, MilliVolts, pSettleMicroSeconds))
+ break;
+ }
+ NV_ASSERT(i < VOLTAGE_CONTROL_RETRY_CNT);
+
+ if (s_Pmu.IoPwrDetectMask || s_Pmu.NoIoPwrMask)
+ {
+ NV_ASSERT(MilliVolts != ODM_VOLTAGE_OFF);
+ if (pSettleMicroSeconds)
+ {
+ t += (*pSettleMicroSeconds);
+ *pSettleMicroSeconds = 0; // Don't wait twice
+ }
+ NvOsWaitUS(t);
+
+ if (s_Pmu.IoPwrDetectMask) // Latch just powered IO rails
+ NvRmPrivIoPowerDetectLatch(hDevice);
+ if (s_Pmu.NoIoPwrMask) // Enable just powered IO rails
+ NvRmPrivIoPowerControl(hDevice, s_Pmu.NoIoPwrMask, NV_TRUE);
+ s_Pmu.IoPwrDetectMask = s_Pmu.NoIoPwrMask = 0;
+ }
+ NvOsMutexUnlock(s_Pmu.hMutex);
+}
+
+void
+NvRmPmuSetSocRailPowerState(
+ NvRmDeviceHandle hDevice,
+ NvU32 vddId,
+ NvBool Enable)
+{
+ NV_ASSERT(hDevice);
+ NvRmPrivSetSocRailPowerState(
+ hDevice, vddId, Enable, &s_Pmu.IoPwrDetectMask, &s_Pmu.NoIoPwrMask);
+}
+
+/*****************************************************************************/
+
+void NvRmPmuSetChargingCurrentLimit(
+ NvRmDeviceHandle hRmDevice,
+ NvRmPmuChargingPath ChargingPath,
+ NvU32 ChargingCurrentLimitMa,
+ NvU32 ChargerType)
+{
+ NvU32 i;
+
+ if (!s_PmuSupportedEnv)
+ return;
+
+ NV_ASSERT(s_Pmu.hMutex);
+ NvOsMutexLock(s_Pmu.hMutex);
+ for (i = 0; i < VOLTAGE_CONTROL_RETRY_CNT; i++)
+ {
+ if (NvOdmPmuSetChargingCurrent(
+ s_Pmu.hOdmPmu, (NvOdmPmuChargingPath)ChargingPath,
+ ChargingCurrentLimitMa, ChargerType))
+ break;
+ }
+ NV_ASSERT(i < VOLTAGE_CONTROL_RETRY_CNT);
+ NvOsMutexUnlock(s_Pmu.hMutex);
+}
+
+/*****************************************************************************/
+
+NvBool NvRmPmuGetAcLineStatus(
+ NvRmDeviceHandle hRmDevice,
+ NvRmPmuAcLineStatus * pStatus )
+{
+ NvBool ReturnStatus = NV_FALSE;
+
+ if (!s_PmuSupportedEnv)
+ return NV_FALSE;
+
+ NV_ASSERT(s_Pmu.hMutex);
+ NvOsMutexLock(s_Pmu.hMutex);
+ ReturnStatus =
+ NvOdmPmuGetAcLineStatus(s_Pmu.hOdmPmu, (NvOdmPmuAcLineStatus*)pStatus);
+ NvOsMutexUnlock(s_Pmu.hMutex);
+ return ReturnStatus;
+}
+
+ NvBool NvRmPmuGetBatteryStatus(
+ NvRmDeviceHandle hRmDevice,
+ NvRmPmuBatteryInstance batteryInst,
+ NvU8 * pStatus )
+{
+ NvBool ReturnStatus = NV_FALSE;
+
+ if (!s_PmuSupportedEnv)
+ return NV_FALSE;
+
+ NV_ASSERT(s_Pmu.hMutex);
+ NvOsMutexLock(s_Pmu.hMutex);
+ ReturnStatus = NvOdmPmuGetBatteryStatus(
+ s_Pmu.hOdmPmu, (NvOdmPmuBatteryInstance)batteryInst, pStatus);
+ NvOsMutexUnlock(s_Pmu.hMutex);
+ return ReturnStatus;
+}
+
+/*****************************************************************************/
+
+NvBool NvRmPmuGetBatteryData(
+ NvRmDeviceHandle hRmDevice,
+ NvRmPmuBatteryInstance batteryInst,
+ NvRmPmuBatteryData * pData )
+{
+ NvOdmPmuBatteryData BatteryData;
+
+ if (!s_PmuSupportedEnv)
+ return NV_FALSE;
+
+ NV_ASSERT(s_Pmu.hMutex);
+ NvOsMutexLock(s_Pmu.hMutex);
+ if (NvOdmPmuGetBatteryData(
+ s_Pmu.hOdmPmu, (NvOdmPmuBatteryInstance)batteryInst, &BatteryData))
+ {
+ pData->batteryAverageCurrent = BatteryData.batteryAverageCurrent;
+ pData->batteryAverageInterval = BatteryData.batteryAverageInterval;
+ pData->batteryCurrent = BatteryData.batteryCurrent;
+ pData->batteryLifePercent = BatteryData.batteryLifePercent;
+ pData->batteryLifeTime = BatteryData.batteryLifeTime;
+ pData->batteryMahConsumed = BatteryData.batteryMahConsumed;
+ pData->batteryTemperature = BatteryData.batteryTemperature;
+ pData->batteryVoltage = BatteryData.batteryVoltage;
+ NvOsMutexUnlock(s_Pmu.hMutex);
+ return NV_TRUE;
+ }
+ NvOsMutexUnlock(s_Pmu.hMutex);
+ return NV_FALSE;
+}
+
+void NvRmPmuGetBatteryFullLifeTime(
+ NvRmDeviceHandle hRmDevice,
+ NvRmPmuBatteryInstance batteryInst,
+ NvU32 * pLifeTime )
+{
+ if (!s_PmuSupportedEnv)
+ return;
+
+ NV_ASSERT(s_Pmu.hMutex);
+ NvOsMutexLock(s_Pmu.hMutex);
+ NvOdmPmuGetBatteryFullLifeTime(
+ s_Pmu.hOdmPmu,(NvOdmPmuBatteryInstance)batteryInst, pLifeTime);
+ NvOsMutexUnlock(s_Pmu.hMutex);
+}
+
+void NvRmPmuGetBatteryChemistry(
+ NvRmDeviceHandle hRmDevice,
+ NvRmPmuBatteryInstance batteryInst,
+ NvRmPmuBatteryChemistry * pChemistry )
+{
+ if (!s_PmuSupportedEnv)
+ return;
+
+ NV_ASSERT(s_Pmu.hMutex);
+ NvOsMutexLock(s_Pmu.hMutex);
+ NvOdmPmuGetBatteryChemistry(s_Pmu.hOdmPmu,
+ (NvOdmPmuBatteryInstance)batteryInst,
+ (NvOdmPmuBatteryChemistry*)pChemistry);
+ NvOsMutexUnlock(s_Pmu.hMutex);
+}
+
+/*****************************************************************************/
+
+NvBool
+NvRmPmuReadRtc(
+ NvRmDeviceHandle hRmDevice,
+ NvU32* pCount)
+{
+ NvBool ReturnStatus = NV_FALSE;
+
+ if (!s_PmuSupportedEnv)
+ return NV_FALSE;
+
+ NV_ASSERT(s_Pmu.hMutex);
+ NvOsMutexLock(s_Pmu.hMutex);
+ ReturnStatus = NvOdmPmuReadRtc(s_Pmu.hOdmPmu, pCount);
+ NvOsMutexUnlock(s_Pmu.hMutex);
+ return ReturnStatus;
+}
+
+NvBool
+NvRmPmuWriteRtc(
+ NvRmDeviceHandle hRmDevice,
+ NvU32 Count)
+{
+ NvBool ReturnStatus = NV_FALSE;
+
+ if (!s_PmuSupportedEnv)
+ return NV_FALSE;
+
+ NV_ASSERT(s_Pmu.hMutex);
+ NvOsMutexLock(s_Pmu.hMutex);
+ ReturnStatus = NvOdmPmuWriteRtc(s_Pmu.hOdmPmu, Count);
+ NvOsMutexUnlock(s_Pmu.hMutex);
+ return ReturnStatus;
+}
+
+NvBool
+NvRmPmuIsRtcInitialized(
+ NvRmDeviceHandle hRmDevice)
+{
+ NvBool ReturnStatus = NV_FALSE;
+
+ if (!s_PmuSupportedEnv)
+ return NV_FALSE;
+
+ NV_ASSERT(s_Pmu.hMutex);
+ NvOsMutexLock(s_Pmu.hMutex);
+ ReturnStatus = NvOdmPmuIsRtcInitialized(s_Pmu.hOdmPmu);
+ NvOsMutexUnlock(s_Pmu.hMutex);
+ return ReturnStatus;
+}
+
+/*****************************************************************************/
+
+NvBool
+NvRmPrivDiagPmuSetVoltage(
+ NvRmDeviceHandle hDevice,
+ NvU32 vddId,
+ NvU32 MilliVolts,
+ NvU32 * pSettleMicroSeconds)
+{
+ NvU32 i;
+
+ if (pSettleMicroSeconds)
+ *pSettleMicroSeconds = 0;
+
+ NV_ASSERT(s_PmuSupportedEnv);
+ NV_ASSERT(NvRmPrivIsDiagMode(NvRmModuleID_Invalid));
+
+ NV_ASSERT(s_Pmu.hMutex);
+ NvOsMutexLock(s_Pmu.hMutex);
+ for (i = 0; i < VOLTAGE_CONTROL_RETRY_CNT; i++)
+ {
+ if (NvOdmPmuSetVoltage(s_Pmu.hOdmPmu, vddId, MilliVolts, pSettleMicroSeconds))
+ break;
+ }
+ NvOsMutexUnlock(s_Pmu.hMutex);
+
+ return (i < VOLTAGE_CONTROL_RETRY_CNT);
+}
+
+/*****************************************************************************/
+
+void
+NvRmPrivPmuRailControl(
+ NvRmDeviceHandle hRmDevice,
+ NvU64 NvRailId,
+ NvBool TurnOn)
+{
+ NvU32 RailAddress, TimeUs;
+ const NvOdmPeripheralConnectivity* pPmuRail;
+ NvRmPmuVddRailCapabilities RailCapabilities = {0};
+
+ if (!s_PmuSupportedEnv)
+ return;
+
+ pPmuRail = NvOdmPeripheralGetGuid(NvRailId);
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pPmuRail);
+ NV_ASSERT(pPmuRail->NumAddress);
+
+ RailAddress = pPmuRail->AddressList[0].Address;
+ if (TurnOn)
+ {
+ NvRmPmuGetCapabilities(hRmDevice, RailAddress, &RailCapabilities);
+ NvRmPmuSetVoltage(hRmDevice, RailAddress,
+ RailCapabilities.requestMilliVolts, &TimeUs);
+ }
+ else
+ {
+ NvRmPmuSetVoltage(
+ hRmDevice, RailAddress, ODM_VOLTAGE_OFF, &TimeUs);
+ }
+ NvOsWaitUS(TimeUs);
+}
+
+NvU32
+NvRmPrivPmuRailGetVoltage(
+ NvRmDeviceHandle hRmDevice,
+ NvU64 NvRailId)
+{
+ NvU32 RailAddress;
+ const NvOdmPeripheralConnectivity* pPmuRail;
+ NvU32 MilliVolts = NVRM_NO_PMU_DEFAULT_VOLTAGE;
+
+ if (s_PmuSupportedEnv)
+ {
+ pPmuRail = NvOdmPeripheralGetGuid(NvRailId);
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(pPmuRail);
+ NV_ASSERT(pPmuRail->NumAddress);
+
+ RailAddress = pPmuRail->AddressList[0].Address;
+ NvRmPmuGetVoltage(hRmDevice, RailAddress, &MilliVolts);
+ }
+ return MilliVolts;
+}
+
+/*****************************************************************************/
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_pmu_private.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_pmu_private.h
new file mode 100644
index 000000000000..8884a1157c57
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_pmu_private.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_PMU_PRIVATE_H
+#define INCLUDED_NVRM_PMU_PRIVATE_H
+
+#include "nvodm_query.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+
+// Default voltage returned in environment with no PMU support
+#define NVRM_NO_PMU_DEFAULT_VOLTAGE (1)
+
+/**
+ * Initializes RM PMU interface handle
+ *
+ * @param hRmDevice The RM device handle
+ *
+ * @return NvSuccess if initialization completed successfully
+ * or one of common error codes on failure
+ */
+NvError
+NvRmPrivPmuInit(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Enables PMU interrupt.
+ *
+ * @param hRmDevice The RM device handle
+ */
+void NvRmPrivPmuInterruptEnable(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Masks/Unmasks OMU interrupt
+ *
+ * @param hRmDevice The RM device handle
+ * @param mask Set NV_TRUE to maks, and NV_FALSE to unmask PMU interrupt
+ */
+void NvRmPrivPmuInterruptMask(NvRmDeviceHandle hRmDevice, NvBool mask);
+
+/**
+ * Deinitializes RM PMU interface
+ *
+ * @param hRmDevice The RM device handle
+ */
+void
+NvRmPrivPmuDeinit(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Sets new voltage level for the specified PMU voltage rail.
+ * Private interface for diagnostic mode only.
+ *
+ * @param hDevice The Rm device handle.
+ * @param vddId The ODM-defined PMU rail ID.
+ * @param MilliVolts The new voltage level to be set in millivolts (mV).
+ * Set to ODM_VOLTAGE_OFF to turn off the target voltage.
+ * @param pSettleMicroSeconds A pointer to the settling time in microseconds (uS),
+ * which is the time for supply voltage to settle after this function
+ * returns; this may or may not include PMU control interface transaction time,
+ * depending on the ODM implementation. If null this parameter is ignored.
+ *
+ * @return NV_TRUE if successful, or NV_FALSE otherwise.
+ */
+NvBool
+NvRmPrivDiagPmuSetVoltage(
+ NvRmDeviceHandle hDevice,
+ NvU32 vddId,
+ NvU32 MilliVolts,
+ NvU32 * pSettleMicroSeconds);
+
+/**
+ * Turns PMU rail On/Off
+ *
+ * @param hRmDevice The RM device handle
+ * @param NvRailId The reserved NV rail GUID
+ * @param TurnOn Turn rail ON if True, or turn rail Off if False
+ */
+void
+NvRmPrivPmuRailControl(
+ NvRmDeviceHandle hRmDevice,
+ NvU64 NvRailId,
+ NvBool TurnOn);
+
+/**
+ * Gets PMU rail voltage
+ *
+ * @param hRmDevice The RM device handle
+ * @param NvRailId The reserved NV rail GUID
+ *
+ * @return PMU rail voltage in mv
+ */
+NvU32
+NvRmPrivPmuRailGetVoltage(
+ NvRmDeviceHandle hRmDevice,
+ NvU64 NvRailId);
+
+// Forward declarations for all chip-specific helper functions
+
+/**
+ * Sets polarity of dedicated SoC PMU interrupt input
+ *
+ * @param hRmDevice The RM device handle
+ * @param Polarity PMU interrupt polarity to be set
+ */
+void
+NvRmPrivAp20SetPmuIrqPolarity(
+ NvRmDeviceHandle hRmDevice,
+ NvOdmInterruptPolarity Polarity);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // INCLUDED_NVRM_PMU_PRIVATE_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_power.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power.c
new file mode 100644
index 000000000000..480fe9482185
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power.c
@@ -0,0 +1,1531 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Power Resource manager </b>
+ *
+ * @b Description: Implements the interface of the NvRM Power.
+ *
+ */
+
+#include "nvrm_power_private.h"
+#include "nvrm_pmu_private.h"
+#include "ap15/ap15rm_private.h"
+#include "ap15/project_relocation_table.h"
+#include "nvassert.h"
+#include "nvrm_hwintf.h"
+#include "ap15/arapbpm.h"
+#include "nvrm_clocks.h"
+#include "nvodm_query.h"
+
+// TODO: Always Disable before check-in
+// Module debug: 0=disable, 1=enable
+#define NVRM_POWER_ENABLE_PRINTF (0)
+
+// TODO: Always Disable before check-in
+// Report every change in RM clients power state: 0=disable, 1=enable
+#define NVRM_POWER_VERBOSE_PRINTF (0)
+
+#if NVRM_POWER_ENABLE_PRINTF || NVRM_POWER_VERBOSE_PRINTF
+#define NVRM_POWER_PRINTF(x) NvOsDebugPrintf x
+#else
+#define NVRM_POWER_PRINTF(x)
+#endif
+
+// Active modules report on suspend entry : 0=disable, 1=enable
+#define NVRM_POWER_DEBUG_SUSPEND_ENTRY (1)
+
+/*****************************************************************************/
+
+// Specifies initial registry size as well as delta for dynamic size change
+#define NVRM_POWER_REGISTRY_DELTA (NvRmPrivModuleID_Num)
+
+/*
+ * Convert registry index to client ID and vice versa: just use
+ * provided mask as high bits combined with index in low bits
+ * (index is expected to not exceed 16 bits ever)
+ */
+#define NVRM_POWER_INDEX2ID(index, mask) (((mask) << 16) | (index))
+#define NVRM_POWER_ID2INDEX(id) ((id) & 0xFFFF)
+
+
+/*
+ * Holds power client voltage request information for a
+ * particular module
+ */
+typedef struct ModuleVoltageReqRec
+{
+ // Target module (combined ID and instance)
+ NvRmModuleID ModuleId;
+
+ // Power group number module belongs to
+ NvU32 PowerGroup;
+
+ // Module power cycle indicator
+ NvBool PowerCycled;
+
+ // Requested voltage range
+ NvRmMilliVolts MinVolts;
+ NvRmMilliVolts MaxVolts;
+
+ // Pointer to the next module info node
+ struct ModuleVoltageReqRec* pNext;
+} ModuleVoltageReq;
+
+/*
+ * Holds power client clock request information for a
+ * particular module
+ */
+typedef struct ModuleClockReqRec
+{
+ // TODO: Define clock request information members
+
+ // Pointer to the next module info node
+ struct ModuleClockReqRec* pNext;
+} ModuleClockReq;
+
+/*
+ * Holds power client busy hint information for a
+ * particular clock domain
+ */
+typedef struct BusyHintReqRec
+{
+ // Requested busy pulse mode
+ NvBool BusyPulseMode;
+
+ // Requested frequency boost in KHz
+ NvRmFreqKHz BoostKHz;
+
+ // Requested boost interval in ms
+ NvU32 IntervalMs;
+
+ // Boost start time in ms
+ NvU32 StartTimeMs;
+
+ // Id of the requester
+ NvU32 ClientId;
+
+ // Pointer to the next busy hint node
+ struct BusyHintReqRec* pNext;
+} BusyHintReq;
+
+/*
+ * Combines voltage and clock requets, starvation and busy hints,
+ * as well as recorded power events for a particular client
+ */
+typedef struct NvRmPowerClientRec
+{
+ // Client registration ID
+ NvU32 id;
+
+ // Client semaphore for power management event signaling
+ NvOsSemaphoreHandle hEventSemaphore;
+
+ // Last detected power management event
+ NvRmPowerEvent Event;
+
+ // Pointer to the array of starvation hints
+ NvBool* pStarvationHints;
+
+ // Head pointer to client volatge request list
+ ModuleVoltageReq* pVoltageReqHead;
+
+ // Head pointer to client clock request list
+ ModuleClockReq* pClockReqHead;
+
+ // Client 4-character tag
+ NvU32 tag;
+} NvRmPowerClient;
+
+/*
+ * Combines information on power clients registred
+ * with RM
+ */
+typedef struct NvRmPowerRegistryRec
+{
+ // Array of pointers to power client records
+ NvRmPowerClient** pPowerClients;
+
+ // Used index range (max used entry index + 1)
+ NvU32 UsedIndexRange;
+
+ // Total number of available entries (array size)
+ NvU32 AvailableEntries;
+} NvRmPowerRegistry;
+
+// RM power clients registry
+static NvRmPowerRegistry s_PowerRegistry;
+
+// Mutex for thread-safe access to RM power clients records
+static NvOsMutexHandle s_hPowerClientMutex = NULL;
+
+// "Power On" request reference count for each SoC Power Group. Appended
+// at the end is a duplicate entry for NPG group that represents power
+// requirements for autonomous h/w operations with no s/w activity
+static NvU32 s_PowerOnRefCounts[NV_POWERGROUP_MAX + 1];
+#define NVRM_POWERGROUP_NPG_AUTO (NV_POWERGROUP_MAX)
+
+// Active starvation hints reference count for each DFS clock domain
+static NvU32 s_StarveOnRefCounts[NvRmDfsClockId_Num];
+
+// Heads of busy hint lists for DFS clock domain
+static BusyHintReq s_BusyReqHeads[NvRmDfsClockId_Num];
+
+
+/*****************************************************************************/
+
+/*
+ * Release memory and system resources allocated for the specified power client
+ */
+static void FreePowerClient(NvRmPowerClient* pPowerClient);
+
+/*
+ * Cancel all requests issued by the specified power client
+ */
+static void CancelPowerRequests(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmPowerClient* pPowerClient);
+
+/*
+ * Notifies RM Clients about power management event
+ */
+static void
+PowerEventNotify(NvRmDeviceHandle hRmDeviceHandle, NvRmPowerEvent Event);
+
+/*
+ * Records power cycle for all RM clients in the specified group
+ */
+static void
+RecordPowerCycle(NvRmDeviceHandle hRmDeviceHandle, NvU32 PowerGroup);
+
+/*
+ * Reports combined RM clients power state to OS adaptation layer
+ * (chip-aware implementation)
+ */
+static void
+ReportRmPowerState(NvRmDeviceHandle hRmDeviceHandle);
+
+/*
+ * Cancels busy hints reported by the specified client for
+ * specified domain
+ */
+static void
+CancelBusyHints(NvRmDfsClockId ClockId, NvU32 ClientId);
+
+/*
+ * Records starvation hints reported against DFS domains by
+ * the specified client
+ */
+static NvError
+RecordStarvationHints(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmPowerClient* pPowerClient,
+ const NvRmDfsStarvationHint* pMultiHint,
+ NvU32 NumHints);
+
+/*
+ * Records busy hints reported against DFS domains by
+ * the specified client
+ */
+static NvError
+RecordBusyHints(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 ClientId,
+ const NvRmDfsBusyHint* pMultiHint,
+ NvU32 NumHints,
+ NvBool* pSignalDfs);
+
+/* Send a simple message to the AVP indicating that it needs
+ * to save state in preparation for LP0 (explicit case)
+ */
+NvError
+NvRmPrivSendAVPIdleMessage( NvRmDeviceHandle hRmDeviceHandle );
+
+/*****************************************************************************/
+static void FreePowerClient(NvRmPowerClient* pPowerClient)
+{
+ ModuleVoltageReq* pVoltageReq = NULL;
+ ModuleClockReq* pClockReq = NULL;
+
+ // Just return if null-pointer
+ if (pPowerClient == NULL)
+ return;
+
+ // Free memory occupied by voltage requests
+ while (pPowerClient->pVoltageReqHead != NULL)
+ {
+ pVoltageReq = pPowerClient->pVoltageReqHead;
+ pPowerClient->pVoltageReqHead = pVoltageReq->pNext;
+ NvOsFree(pVoltageReq);
+ }
+
+ // Free memory occupied by clock requests
+ while (pPowerClient->pClockReqHead != NULL)
+ {
+ pClockReq = pPowerClient->pClockReqHead;
+ pPowerClient->pClockReqHead = pClockReq->pNext;
+ NvOsFree(pClockReq);
+ }
+
+ // Free memory occupied by starvation hints array
+ NvOsFree(pPowerClient->pStarvationHints);
+
+ // Free power management event semaphore handle
+ NvOsSemaphoreDestroy(pPowerClient->hEventSemaphore);
+
+ // Free memory occupied by the client record
+ NvOsFree(pPowerClient);
+}
+
+static void CancelPowerRequests(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmPowerClient* pPowerClient)
+{
+ NvU32 i;
+ ModuleVoltageReq* pVoltageReq = NULL;
+
+ // Cancel power On requests and update power planes as well as
+ // combined RM clients power state accordingly
+ pVoltageReq = pPowerClient->pVoltageReqHead;
+ while (pVoltageReq != NULL)
+ {
+ if (pVoltageReq->MaxVolts != NvRmVoltsOff)
+ {
+ NvU32 PowerGroup = pVoltageReq->PowerGroup;
+ pVoltageReq->MaxVolts = NvRmVoltsOff;
+
+ NV_ASSERT(s_PowerOnRefCounts[PowerGroup] != 0);
+ s_PowerOnRefCounts[PowerGroup]--;
+ if (s_PowerOnRefCounts[PowerGroup] == 0)
+ {
+ NvRmPrivPowerGroupControl(hRmDeviceHandle, PowerGroup,
+ NV_FALSE);
+ ReportRmPowerState(hRmDeviceHandle);
+ }
+ }
+ pVoltageReq = pVoltageReq->pNext;
+ }
+ // Cancel starvation hints
+ if (pPowerClient->pStarvationHints != NULL)
+ {
+ for (i = 0; i < NvRmDfsClockId_Num; i++)
+ {
+ if (pPowerClient->pStarvationHints[i])
+ {
+ pPowerClient->pStarvationHints[i] = NV_FALSE;
+ if ((i == NvRmDfsClockId_Cpu) ||
+ (i == NvRmDfsClockId_Avp) ||
+ (i == NvRmDfsClockId_Vpipe))
+ {
+ NV_ASSERT(s_StarveOnRefCounts[NvRmDfsClockId_Emc] != 0);
+ s_StarveOnRefCounts[NvRmDfsClockId_Emc]--;
+ }
+ NV_ASSERT(s_StarveOnRefCounts[i] != 0);
+ s_StarveOnRefCounts[i]--;
+ }
+ }
+ }
+
+ // Cancle busy hints
+ for (i = 0; i < NvRmDfsClockId_Num; i++)
+ {
+ CancelBusyHints(i, pPowerClient->id);
+ }
+
+ // TODO: Cancel clock requests issued by the client
+}
+
+/*****************************************************************************/
+NvError NvRmPrivPowerInit(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvError e;
+
+ NV_ASSERT(hRmDeviceHandle);
+
+ // TODO: expand after clock API is completed
+
+ // Initialize registry
+ s_PowerRegistry.pPowerClients = NULL;
+ s_PowerRegistry.AvailableEntries = 0;
+ s_PowerRegistry.UsedIndexRange = 0;
+
+ // Clear busy head pointers as well as starvation and power plane
+ // reference counts. Aalthough power plane references are cleared
+ // here, the combined power state is not updated - it will kept as
+ // set by the boot code, until the 1st client requests power.
+ NvOsMemset(s_BusyReqHeads, 0, sizeof(s_BusyReqHeads));
+ NvOsMemset(s_StarveOnRefCounts, 0, sizeof(s_StarveOnRefCounts));
+ NvOsMemset(s_PowerOnRefCounts, 0, sizeof(s_PowerOnRefCounts));
+
+ // Create the RM registry mutex and initialize RM/OAL interface
+ s_hPowerClientMutex = NULL;
+ NV_CHECK_ERROR_CLEANUP(NvOsMutexCreate(&s_hPowerClientMutex));
+ NV_CHECK_ERROR_CLEANUP(NvRmPrivOalIntfInit(hRmDeviceHandle));
+
+ // Initialize power group control, and power gate SoC partitions
+ NvRmPrivPowerGroupControlInit(hRmDeviceHandle);
+ return NvSuccess;
+
+fail:
+ NvRmPrivOalIntfDeinit(hRmDeviceHandle);
+ NvOsMutexDestroy(s_hPowerClientMutex);
+ s_hPowerClientMutex = NULL;
+ return e;
+}
+
+
+void NvRmPrivPowerDeinit(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvU32 i;
+ NvRmPowerRegistry* pRegistry = &s_PowerRegistry;
+
+ NV_ASSERT(hRmDeviceHandle);
+
+ // TODO: expand after clock API is completed
+
+ // Free busy hint lists for DFS clock domains
+ for (i = 0; i < NvRmDfsClockId_Num; i++)
+ {
+ while (s_BusyReqHeads[i].pNext != NULL)
+ {
+ BusyHintReq* pBusyHintReq = s_BusyReqHeads[i].pNext;
+ s_BusyReqHeads[i].pNext = pBusyHintReq->pNext;
+ NvOsFree(pBusyHintReq);
+ }
+ }
+ // Free RM power registry memory
+ for (i = 0; i < pRegistry->UsedIndexRange; i++)
+ {
+ FreePowerClient(pRegistry->pPowerClients[i]);
+ }
+ NvOsFree(pRegistry->pPowerClients);
+ pRegistry->pPowerClients = NULL;
+ pRegistry->AvailableEntries = 0;
+ pRegistry->UsedIndexRange = 0;
+
+ // Destroy RM registry mutex and free RM/OAL interface resources
+ NvRmPrivOalIntfDeinit(hRmDeviceHandle);
+ NvOsMutexDestroy(s_hPowerClientMutex);
+ s_hPowerClientMutex = NULL;
+}
+
+/*****************************************************************************/
+
+NvError
+NvRmPowerRegister(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvOsSemaphoreHandle hEventSemaphore,
+ NvU32* pClientId)
+{
+ NvU32 FreeIndex;
+ NvError error;
+ NvOsSemaphoreHandle hSema = NULL;
+ NvRmPowerClient* pNewClient = NULL;
+ NvRmPowerRegistry* pRegistry = &s_PowerRegistry;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pClientId);
+
+ // If non-zero semaphore handle is passed, duplicate it to be avialable
+ // after the call. Abort registration if non-zero handle is invalid
+ if (hEventSemaphore != NULL)
+ {
+ error = NvOsSemaphoreClone(hEventSemaphore, &hSema);
+ if (error != NvSuccess)
+ {
+ NV_ASSERT(!" Power Register Semaphore Clone error. ");
+ }
+ }
+
+ NvOsMutexLock(s_hPowerClientMutex);
+
+ // Find free registry entry for the new client
+ for (FreeIndex = 0; FreeIndex < pRegistry->UsedIndexRange; FreeIndex++)
+ {
+ if (pRegistry->pPowerClients[FreeIndex] == NULL)
+ break;
+ }
+ if (FreeIndex == pRegistry->AvailableEntries)
+ {
+ // If all avilable entries are used, re-size registry array
+ NvU32 entries = pRegistry->AvailableEntries +
+ NVRM_POWER_REGISTRY_DELTA;
+ size_t s = sizeof(*pRegistry->pPowerClients) * (size_t)entries;
+ NvRmPowerClient** p = NvOsRealloc(pRegistry->pPowerClients, s);
+ if (p == NULL)
+ {
+ NvU32 old_size;
+
+ /* fall back to NvOsAlloc */
+ p = NvOsAlloc( s );
+ if( p == NULL )
+ {
+ goto failed;
+ }
+
+ /* copy the old data, free, etc, */
+ old_size = sizeof(*pRegistry->pPowerClients) *
+ pRegistry->AvailableEntries;
+
+ NvOsMemcpy( p, pRegistry->pPowerClients, old_size );
+ NvOsFree( pRegistry->pPowerClients );
+ }
+ pRegistry->pPowerClients = p;
+ pRegistry->AvailableEntries = entries;
+ }
+ if (FreeIndex == pRegistry->UsedIndexRange)
+ {
+ // If reached used index range boundary, advance it
+ pRegistry->UsedIndexRange++;
+ }
+
+ // Allocate and store new client record pointer in registry (null-pointer
+ // marks registry entry as free, so it's OK to store it before error check)
+ pNewClient = NvOsAlloc(sizeof(*pNewClient));
+ pRegistry->pPowerClients[FreeIndex] = pNewClient;
+ if (pNewClient == NULL)
+ {
+ goto failed;
+ }
+
+ // Fill in new client entry
+ pNewClient->hEventSemaphore = hSema;
+ pNewClient->Event = NvRmPowerEvent_NoEvent;
+ pNewClient->pVoltageReqHead = NULL;
+ pNewClient->pClockReqHead = NULL;
+ pNewClient->pStarvationHints = NULL;
+ pNewClient->tag = *pClientId;
+
+ /*
+ * Combine index with client pointer into registration ID returned to the
+ * client. This will make it a little bit more difficult for not-registered
+ * clients to guess/re-use IDs
+ */
+ pNewClient->id = NVRM_POWER_INDEX2ID(FreeIndex, (NvU32)pClientId);
+ *pClientId = pNewClient->id;
+
+ NvOsMutexUnlock(s_hPowerClientMutex);
+ return NvSuccess;
+
+failed:
+ NvOsFree(pNewClient);
+ NvOsSemaphoreDestroy(hSema);
+ NvOsMutexUnlock(s_hPowerClientMutex);
+ return NvError_InsufficientMemory;
+}
+
+/*****************************************************************************/
+
+void NvRmPowerUnRegister(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 ClientId)
+{
+ NvRmPowerClient* pPowerClient = NULL;
+ NvRmPowerRegistry* pRegistry = &s_PowerRegistry;
+ NvU32 ClientIndex = NVRM_POWER_ID2INDEX(ClientId);
+
+ NV_ASSERT(hRmDeviceHandle);
+
+ NvOsMutexLock(s_hPowerClientMutex);
+
+ // Check if this ID was registered
+ if (ClientIndex < pRegistry->UsedIndexRange)
+ {
+ pPowerClient = pRegistry->pPowerClients[ClientIndex];
+ }
+ if ((pPowerClient == NULL) || (pPowerClient->id != ClientId))
+ {
+ NvOsMutexUnlock(s_hPowerClientMutex);
+ return;
+ }
+
+ // Cancel power requets issued by the power client to be unregistered
+ CancelPowerRequests(hRmDeviceHandle, pPowerClient);
+
+ // Free power client memory and mark the respectve registry entry as free
+ FreePowerClient(pPowerClient);
+ pRegistry->pPowerClients[ClientIndex] = NULL;
+
+ // Decrement used index range as much as possible
+ while ((pRegistry->UsedIndexRange > 0) &&
+ (pRegistry->pPowerClients[pRegistry->UsedIndexRange - 1] == NULL))
+ {
+ pRegistry->UsedIndexRange--;
+ }
+
+ // Shrink registry if too much free space (keep one delta margin)
+ if ((pRegistry->UsedIndexRange + 2 * NVRM_POWER_REGISTRY_DELTA) <=
+ pRegistry->AvailableEntries)
+ {
+ NvU32 entries = pRegistry->UsedIndexRange + NVRM_POWER_REGISTRY_DELTA;
+ size_t s = sizeof(*pRegistry->pPowerClients) * (size_t)entries;
+ NvRmPowerClient** p = NvOsRealloc(pRegistry->pPowerClients, s);
+ if (p != NULL)
+ {
+ pRegistry->pPowerClients = p;
+ pRegistry->AvailableEntries = entries;
+ }
+
+ // FIXME: handle NvOsRealloc failure -- try NvOsAlloc instead
+ }
+ NvOsMutexUnlock(s_hPowerClientMutex);
+}
+
+/*****************************************************************************/
+
+NvError NvRmPowerGetEvent(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 ClientId,
+ NvRmPowerEvent* pEvent)
+{
+ NvRmPowerClient* pPowerClient = NULL;
+ NvRmPowerRegistry* pRegistry = &s_PowerRegistry;
+ NvU32 ClientIndex = NVRM_POWER_ID2INDEX(ClientId);
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pEvent);
+
+ NvOsMutexLock(s_hPowerClientMutex);
+
+ // Check if this ID was registered; return error otherwise
+ if (ClientIndex < pRegistry->UsedIndexRange)
+ {
+ pPowerClient = pRegistry->pPowerClients[ClientIndex];
+ }
+ if ((pPowerClient == NULL) || (pPowerClient->id != ClientId))
+ {
+ NvOsMutexUnlock(s_hPowerClientMutex);
+ return NvError_BadValue;
+ }
+
+ // Return last recorded power event and set no outstanding events
+ *pEvent = pPowerClient->Event;
+ pPowerClient->Event = NvRmPowerEvent_NoEvent;
+
+ NvOsMutexUnlock(s_hPowerClientMutex);
+ return NvSuccess;
+}
+
+void NvRmPowerEventNotify(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmPowerEvent Event)
+{
+ NV_ASSERT(hRmDeviceHandle);
+
+ // Just in case
+ if (Event == NvRmPowerEvent_NoEvent)
+ return;
+
+ NvOsMutexLock(s_hPowerClientMutex);
+ PowerEventNotify(hRmDeviceHandle, Event);
+ NvOsMutexUnlock(s_hPowerClientMutex);
+}
+
+static void
+PowerEventNotify(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmPowerEvent Event)
+{
+ NvU32 i;
+ NvRmPowerClient* pPowerClient = NULL;
+ NvRmPowerRegistry* pRegistry = &s_PowerRegistry;
+
+ NVRM_POWER_PRINTF(("%s is reported to RM clients\n",
+ (Event == NvRmPowerEvent_WakeLP0)? "Wake from LP0" : "Wake from LP1"));
+
+ // Restore clocks after LP0
+ if (Event == NvRmPowerEvent_WakeLP0)
+ NvRmPrivClocksResume(hRmDeviceHandle);
+
+ // Store event for all registered clients, and signal only those, that
+ // have provided valid semaphore handle; on wake from low power states
+ // set power cycled indicators
+ for (i = 0; i < pRegistry->UsedIndexRange; i++)
+ {
+ pPowerClient = pRegistry->pPowerClients[i];
+ if (pPowerClient != NULL)
+ {
+ ModuleVoltageReq* pVoltageReq = pPowerClient->pVoltageReqHead;
+ while (pVoltageReq != NULL)
+ {
+ if (Event == NvRmPowerEvent_WakeLP0)
+ {
+ //LP0: all power groups, except AO group, are powered down
+ // when core power is down
+ if (pVoltageReq->PowerGroup != NV_POWERGROUP_AO)
+ pVoltageReq->PowerCycled = NV_TRUE;
+ }
+ else if (Event == NvRmPowerEvent_WakeLP1)
+ {
+ // LP1: core power is preserved; modules in powered down
+ // groups are tracked via RecordPowerCycle()
+ }
+ pVoltageReq = pVoltageReq->pNext;
+ }
+ pPowerClient->Event = Event;
+ if (pPowerClient->hEventSemaphore != NULL)
+ {
+ NvOsSemaphoreSignal(pPowerClient->hEventSemaphore);
+ }
+ }
+ }
+}
+
+static void
+RecordPowerCycle(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 PowerGroup)
+{
+ NvU32 i;
+ NvRmPowerClient* pPowerClient = NULL;
+ NvRmPowerRegistry* pRegistry = &s_PowerRegistry;
+
+ NVRM_POWER_PRINTF(("Power Cycled partition: %d\n", PowerGroup));
+
+ // Traverse registered clients, and mark all modules in the specified
+ // power group as power cycled
+ for (i = 0; i < pRegistry->UsedIndexRange; i++)
+ {
+ pPowerClient = pRegistry->pPowerClients[i];
+ if (pPowerClient != NULL)
+ {
+ ModuleVoltageReq* pVoltageReq = pPowerClient->pVoltageReqHead;
+ while (pVoltageReq != NULL)
+ {
+ if (pVoltageReq->PowerGroup == PowerGroup)
+ {
+ pVoltageReq->PowerCycled = NV_TRUE;
+ }
+ pVoltageReq = pVoltageReq->pNext;
+ }
+ }
+ }
+}
+
+/*****************************************************************************/
+
+static void
+ReportRmPowerState(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvU32 i;
+ NvRmPowerState OldRmState = NvRmPrivPowerGetState(hRmDeviceHandle);
+ NvRmPowerState NewRmState = NvRmPowerState_Idle;
+
+ // RM clients are in h/w autonomous (bypass) state if there are Power On
+ // references for NPG_AUTO group only; RM clients are in active state if
+ // there are Power On references for any other group
+ if (s_PowerOnRefCounts[NVRM_POWERGROUP_NPG_AUTO] != 0)
+ NewRmState = NvRmPowerState_AutoHw;
+
+ for (i = 0; i < NV_POWERGROUP_MAX; i++)
+ {
+ if (s_PowerOnRefCounts[i] != 0)
+ {
+ NewRmState = NvRmPowerState_Active;
+ break;
+ }
+ }
+ if (NewRmState == OldRmState)
+ return;
+
+#if NVRM_POWER_VERBOSE_PRINTF
+ NVRM_POWER_PRINTF(("RM Clients Power State: %s\n",
+ ((NewRmState == NvRmPowerState_Active) ? "Active" :
+ ((NewRmState == NvRmPowerState_AutoHw) ? "AutoHw" : "Idle"))));
+#endif
+ /*
+ * Set new combined RM clients power state in the storage shared with the
+ * OS adaptation layer. Check the previous state; if it was any of the low
+ * power states (i.e., this is the 1st RM power state report after suspend)
+ * notify all clients about wake up event.
+ */
+ NvRmPrivPowerSetState(hRmDeviceHandle, NewRmState);
+ switch (OldRmState)
+ {
+ case NvRmPowerState_LP0:
+ NvOsDebugPrintf("*** Wakeup from LP0 ***\n");
+ PowerEventNotify(hRmDeviceHandle, NvRmPowerEvent_WakeLP0);
+ break;
+ case NvRmPowerState_LP1:
+ NvOsDebugPrintf("*** Wakeup from LP1 ***\n");
+ PowerEventNotify(hRmDeviceHandle, NvRmPowerEvent_WakeLP1);
+ break;
+ case NvRmPowerState_SkippedLP0:
+ NvOsDebugPrintf("*** Wakeup after Skipped LP0 ***\n");
+ // resume procedure after Skipped LP0 is the same as after LP1
+ PowerEventNotify(hRmDeviceHandle, NvRmPowerEvent_WakeLP1);
+ break;
+ default:
+ break;
+ }
+}
+
+NvError
+NvRmPowerGetState(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmPowerState* pState)
+{
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pState);
+
+ NvOsMutexLock(s_hPowerClientMutex);
+ *pState = NvRmPrivPowerGetState(hRmDeviceHandle);
+ NvOsMutexUnlock(s_hPowerClientMutex);
+ return NvSuccess;
+}
+
+NvError
+NvRmPowerVoltageControl(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID ModuleId,
+ NvU32 ClientId,
+ NvRmMilliVolts MinVolts,
+ NvRmMilliVolts MaxVolts,
+ const NvRmMilliVolts* PrefVoltageList,
+ NvU32 PrefVoltageListCount,
+ NvRmMilliVolts* pCurrentVolts)
+{
+ NvError error;
+ NvU32 PowerGroup = 0;
+ NvBool PowerChanged = NV_FALSE;
+ NvRmModuleInstance *pInstance = NULL;
+ ModuleVoltageReq* pVoltageReq = NULL;
+ NvRmPowerClient* pPowerClient = NULL;
+ NvRmPowerRegistry* pRegistry = &s_PowerRegistry;
+ NvU32 ClientIndex = NVRM_POWER_ID2INDEX(ClientId);
+
+ /* validate the Rm Handle */
+ NV_ASSERT(hRmDeviceHandle);
+
+ // Validate module ID and get associated Power Group
+ if (ModuleId == NvRmPrivModuleID_System)
+ {
+ PowerGroup = NVRM_POWERGROUP_NPG_AUTO;
+ }
+ else
+ {
+ error = NvRmPrivGetModuleInstance(hRmDeviceHandle, ModuleId, &pInstance);
+ if (error != NvSuccess)
+ {
+ NV_ASSERT(!" Voltage control: Invalid module ID. ");
+ return NvError_ModuleNotPresent;
+ }
+ PowerGroup = pInstance->DevPowerGroup;
+ NV_ASSERT(PowerGroup < NV_POWERGROUP_MAX);
+ }
+
+ NvOsMutexLock(s_hPowerClientMutex);
+
+ // Check if this ID was registered; return error otherwise
+ if (ClientIndex < pRegistry->UsedIndexRange)
+ {
+ pPowerClient = pRegistry->pPowerClients[ClientIndex];
+ }
+ if ((pPowerClient == NULL) || (pPowerClient->id != ClientId))
+ {
+ NvOsMutexUnlock(s_hPowerClientMutex);
+ return NvError_BadValue;
+ }
+
+ // Search for the previously recorded voltage request for this module
+ pVoltageReq = pPowerClient->pVoltageReqHead;
+ while ((pVoltageReq != NULL) && (pVoltageReq->ModuleId != ModuleId))
+ {
+ pVoltageReq = pVoltageReq->pNext;
+ }
+
+ // If it is a new voltage request record, allocate and fill it in,
+ // otherwise just update power status. In both cases determine if
+ // power requirements for the module have changed.
+ if (pVoltageReq == NULL)
+ {
+ pVoltageReq = NvOsAlloc(sizeof(*pVoltageReq));
+ if (pVoltageReq == NULL)
+ {
+ NvOsMutexUnlock(s_hPowerClientMutex);
+ return NvError_InsufficientMemory;
+ }
+ // Link at head
+ pVoltageReq->pNext = pPowerClient->pVoltageReqHead;
+ pPowerClient->pVoltageReqHead = pVoltageReq;
+ pVoltageReq->ModuleId = ModuleId;
+ pVoltageReq->PowerGroup = PowerGroup;
+ pVoltageReq->PowerCycled = NV_FALSE;
+
+ // Only new power On request counts as change
+ PowerChanged = (MaxVolts != NvRmVoltsOff);
+ }
+ else
+ {
+ // Only changes from On to Off or vice versa counts
+ PowerChanged = (pVoltageReq->MaxVolts != MaxVolts) &&
+ ((pVoltageReq->MaxVolts == NvRmVoltsOff) ||
+ (MaxVolts == NvRmVoltsOff));
+ }
+ // Record new power request voltages
+ pVoltageReq->MinVolts = MinVolts;
+ pVoltageReq->MaxVolts = MaxVolts;
+
+ // If module power requirements have changed, update power group reference
+ // count, and execute the respective h/w power control procedure
+ if (PowerChanged)
+ {
+ if (MaxVolts != NvRmVoltsOff)
+ {
+ s_PowerOnRefCounts[PowerGroup]++;
+ if (s_PowerOnRefCounts[PowerGroup] == 1)
+ {
+ NvRmMilliVolts v =
+ NvRmPrivPowerGroupGetVoltage(hRmDeviceHandle, PowerGroup);
+ if (v == NvRmVoltsOff)
+ {
+ RecordPowerCycle(hRmDeviceHandle, PowerGroup);
+ NvRmPrivPowerGroupControl(hRmDeviceHandle, PowerGroup, NV_TRUE);
+ }
+ }
+ }
+ else
+ {
+ NV_ASSERT(s_PowerOnRefCounts[PowerGroup] != 0);
+ if (s_PowerOnRefCounts[PowerGroup] == 0)
+ {
+ NVRM_POWER_PRINTF(("Power balance failed: module %d\n", ModuleId));
+ }
+ s_PowerOnRefCounts[PowerGroup]--;
+ if (s_PowerOnRefCounts[PowerGroup] == 0)
+ {
+ NvRmPrivPowerGroupControl(hRmDeviceHandle, PowerGroup, NV_FALSE);
+ }
+ }
+ }
+ ReportRmPowerState(hRmDeviceHandle);
+
+ // Return current voltage, unless this is the first request after module
+ // was power cycled by RM; in the latter case return NvRmVoltsCycled value
+ if (pCurrentVolts != NULL)
+ {
+ *pCurrentVolts = NvRmPrivPowerGroupGetVoltage(hRmDeviceHandle, PowerGroup);
+ if (pVoltageReq->PowerCycled && (*pCurrentVolts != NvRmVoltsOff))
+ {
+ *pCurrentVolts = NvRmVoltsCycled;
+ }
+ }
+ // In any case clear power cycled indicator
+ pVoltageReq->PowerCycled = NV_FALSE;
+
+ NvOsMutexUnlock(s_hPowerClientMutex);
+ return NvSuccess;
+}
+
+void
+NvRmListPowerAwareModules(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32* pListSize,
+ NvRmModuleID* pIdList,
+ NvBool* pActiveList)
+{
+ NvBool active;
+ NvU32 i, ModulesNum, ActiveNum;
+ ModuleVoltageReq* pVoltageReq = NULL;
+ NvRmPowerClient* pPowerClient = NULL;
+ NvRmPowerRegistry* pRegistry = &s_PowerRegistry;
+
+
+ /* validate the Rm Handle */
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pListSize);
+ NV_ASSERT(((*pListSize) == 0) || (pIdList && pActiveList));
+
+ NvOsMutexLock(s_hPowerClientMutex);
+
+ // Count power aware modules, fill in the list
+ for (i = ModulesNum = ActiveNum = 0; i < pRegistry->UsedIndexRange; i++)
+ {
+ pPowerClient = pRegistry->pPowerClients[i];
+ if (pPowerClient)
+ {
+ pVoltageReq = pPowerClient->pVoltageReqHead;
+ while (pVoltageReq != NULL)
+ {
+ ModulesNum++;
+ active = (pVoltageReq->MaxVolts != NvRmVoltsOff);
+ ActiveNum += active ? 1 : 0;
+ if (*pListSize >= ModulesNum)
+ {
+ *(pIdList++) = pVoltageReq->ModuleId;
+ *(pActiveList++) = active;
+ }
+ pVoltageReq = pVoltageReq->pNext;
+ }
+ }
+ }
+ // Report number of found modules
+ if ((*pListSize == 0) || (*pListSize > ModulesNum))
+ {
+ *pListSize = ModulesNum;
+ }
+ // Total refcounts must be = number of active modules
+ for (i = 0; i <= NV_POWERGROUP_MAX; i++)
+ ActiveNum -= s_PowerOnRefCounts[i];
+ NV_ASSERT(ActiveNum == 0);
+
+ NvOsMutexUnlock(s_hPowerClientMutex);
+}
+
+/*****************************************************************************/
+
+static NvError
+RecordStarvationHints(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmPowerClient* pPowerClient,
+ const NvRmDfsStarvationHint* pMultiHint,
+ NvU32 NumHints)
+{
+ NvU32 i;
+ NvBool HintChanged = NV_FALSE;
+
+ for (i = 0; i < NumHints; i++)
+ {
+ NvRmDfsClockId ClockId = pMultiHint[i].ClockId;
+ NvBool Starving = pMultiHint[i].Starving;
+ NV_ASSERT((0 < ClockId) && (ClockId < NvRmDfsClockId_Num));
+
+ /*
+ * If this is the first starvation hint, allocate hints array and fill
+ * it in. Otherwise, just update starvation hint status. In both cases
+ * determine if starvation hint for clock domain has changed.
+ */
+ if (pPowerClient->pStarvationHints == NULL)
+ {
+ size_t s = sizeof(NvBool) * (size_t)NvRmDfsClockId_Num;
+ NvBool* p = NvOsAlloc(s);
+ if (p == NULL)
+ {
+ return NvError_InsufficientMemory;
+ }
+ NvOsMemset(p, 0, s);
+ pPowerClient->pStarvationHints = p;
+
+ // Only new Satrvation On hint counts as change
+ HintChanged = Starving;
+ }
+ else
+ {
+ // Only changes from On to Off or vice versa counts
+ HintChanged = (pPowerClient->pStarvationHints[ClockId] != Starving);
+ }
+ pPowerClient->pStarvationHints[ClockId] = Starving;
+
+ // If hint has changed, update clock domain starvation reference count
+ // (hint against CPU, or AVP, or VDE is automatically applied to EMC)
+ if (HintChanged)
+ {
+ if (Starving)
+ {
+ if ((ClockId == NvRmDfsClockId_Cpu) ||
+ (ClockId == NvRmDfsClockId_Avp) ||
+ (ClockId == NvRmDfsClockId_Vpipe))
+ {
+ s_StarveOnRefCounts[NvRmDfsClockId_Emc]++;
+ }
+ s_StarveOnRefCounts[ClockId]++;
+ }
+ else
+ {
+ if ((ClockId == NvRmDfsClockId_Cpu) ||
+ (ClockId == NvRmDfsClockId_Avp) ||
+ (ClockId == NvRmDfsClockId_Vpipe))
+ {
+ NV_ASSERT(s_StarveOnRefCounts[NvRmDfsClockId_Emc] != 0);
+ s_StarveOnRefCounts[NvRmDfsClockId_Emc]--;
+ }
+ NV_ASSERT(s_StarveOnRefCounts[ClockId] != 0);
+ s_StarveOnRefCounts[ClockId]--;
+ }
+ }
+ }
+ return NvSuccess;
+}
+
+NvBool NvRmPrivDfsIsStarving(NvRmDfsClockId ClockId)
+{
+ NV_ASSERT((0 < ClockId) && (ClockId < NvRmDfsClockId_Num));
+ // Boolean read - no need for lock
+ return (s_StarveOnRefCounts[ClockId] != 0);
+}
+
+NvError
+NvRmPowerStarvationHintMulti(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 ClientId,
+ const NvRmDfsStarvationHint* pMultiHint,
+ NvU32 NumHints)
+{
+ NvError error;
+ NvRmPowerClient* pPowerClient = NULL;
+ NvRmPowerRegistry* pRegistry = &s_PowerRegistry;
+ NvU32 ClientIndex = NVRM_POWER_ID2INDEX(ClientId);
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pMultiHint && NumHints);
+
+ /* Do nothing on platforms where there is no freq scaling like QT and FPGA */
+ if (NvRmPrivGetExecPlatform(hRmDeviceHandle) != ExecPlatform_Soc)
+ {
+ return NvSuccess;
+ }
+ // Do nothing if DFS is disabled, and therefore all clocks are maxed anyway
+ if (NvRmDfsGetState(hRmDeviceHandle) <= NvRmDfsRunState_Disabled)
+ {
+ return NvSuccess;
+ }
+
+ NvOsMutexLock(s_hPowerClientMutex);
+
+ // Check if this client ID was registered; return error otherwise
+ if (ClientIndex < pRegistry->UsedIndexRange)
+ {
+ pPowerClient = pRegistry->pPowerClients[ClientIndex];
+ }
+ if ((pPowerClient == NULL) || (pPowerClient->id != ClientId))
+ {
+ NvOsMutexUnlock(s_hPowerClientMutex);
+ return NvError_BadValue;
+ }
+ // Add new stravtion hint
+ error = RecordStarvationHints(
+ hRmDeviceHandle, pPowerClient, pMultiHint, NumHints);
+
+ NvOsMutexUnlock(s_hPowerClientMutex);
+
+ if (error == NvSuccess)
+ NvRmPrivStarvationHintPrintf(
+ ClientIndex, pPowerClient->tag, pMultiHint, NumHints);
+ return error;
+}
+
+NvError
+NvRmPowerStarvationHint (
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmDfsClockId ClockId,
+ NvU32 ClientId,
+ NvBool Starving)
+{
+ NvRmDfsStarvationHint StarvationHint;
+
+ // Pack hit record
+ StarvationHint.ClockId = ClockId;
+ StarvationHint.Starving = Starving;
+
+ return NvRmPowerStarvationHintMulti(
+ hRmDeviceHandle, ClientId, &StarvationHint, 1);
+}
+
+/*****************************************************************************/
+
+static void CancelBusyHints(NvRmDfsClockId ClockId, NvU32 ClientId)
+{
+ BusyHintReq* pBusyHintReq = NULL;
+ BusyHintReq* pBusyHintNext = NULL;
+
+ /*
+ * Traverse busy hints list, starting from the head and looking for hints
+ * reported by the specified client. Remove found hint nodes on the way.
+ */
+ pBusyHintReq = &s_BusyReqHeads[ClockId];
+ if (pBusyHintReq->ClientId == ClientId)
+ {
+ pBusyHintReq->IntervalMs = 0; // Keep head for just one more sample
+ }
+ while (pBusyHintReq != NULL)
+ {
+ pBusyHintNext = pBusyHintReq->pNext;
+ if ((pBusyHintNext != NULL) && (pBusyHintNext->ClientId == ClientId))
+ {
+ pBusyHintReq->pNext = pBusyHintNext->pNext;
+ NvOsFree(pBusyHintNext);
+ continue;
+ }
+ pBusyHintReq = pBusyHintNext;
+ }
+}
+
+static void PurgeBusyHints(NvRmDfsClockId ClockId, NvU32 msec)
+{
+ static NvU32 s_LastPurgeMs = 0;
+ BusyHintReq* pBusyHintReq = NULL;
+ BusyHintReq* pBusyHintNext = NULL;
+
+ if ((msec - s_LastPurgeMs) <= NVRM_DFS_BUSY_PURGE_MS)
+ return;
+
+ /*
+ * If time to purge the busy hints list, traverse it starting from the
+ * head and looking for the expired hints. Remove found nodes on the way.
+ */
+ pBusyHintReq = &s_BusyReqHeads[ClockId];
+ while (pBusyHintReq != NULL)
+ {
+ pBusyHintNext = pBusyHintReq->pNext;
+ if ( (pBusyHintNext != NULL) &&
+ (pBusyHintNext->IntervalMs < (msec - pBusyHintNext->StartTimeMs)) )
+ {
+ pBusyHintReq->pNext = pBusyHintNext->pNext;
+ NvOsFree(pBusyHintNext);
+ continue;
+ }
+ pBusyHintReq = pBusyHintNext;
+ }
+ s_LastPurgeMs = msec;
+}
+
+static NvError
+RecordBusyHints(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 ClientId,
+ const NvRmDfsBusyHint* pMultiHint,
+ NvU32 NumHints,
+ NvBool* pSignalDfs)
+{
+ NvU32 i;
+ NvRmFreqKHz MaxKHz;
+ BusyHintReq* pInsert = NULL;
+ BusyHintReq* pBusyHintReq = NULL;
+ NvU32 msec = NvOsGetTimeMS();
+
+ *pSignalDfs = NV_FALSE;
+
+ for (i = 0; i < NumHints; i++)
+ {
+ NvRmDfsClockId ClockId = pMultiHint[i].ClockId;
+ NvRmFreqKHz BoostKHz = pMultiHint[i].BoostKHz;
+ NvU32 BoostDurationMs = pMultiHint[i].BoostDurationMs;
+ NvBool BusyPulseMode = pMultiHint[i].BusyAttribute;
+ NV_ASSERT((0 < ClockId) && (ClockId < NvRmDfsClockId_Num));
+
+ // Clip requested boost frequency to domain maximum
+ MaxKHz = NvRmPrivDfsGetMaxKHz(ClockId);
+ if (BoostKHz > MaxKHz)
+ {
+ BoostKHz = MaxKHz;
+ }
+
+ // Cancel all hints sent by this client if it is no longer busy;
+ // signal DFS boost removed
+ if (BoostKHz == 0)
+ {
+ CancelBusyHints(ClockId, ClientId);
+ *pSignalDfs = NV_TRUE;
+ continue;
+ }
+
+ // Update maximum boost frequency stored in the head entry; signal DFS
+ // boost increase
+ if (s_BusyReqHeads[ClockId].BoostKHz < BoostKHz)
+ {
+ s_BusyReqHeads[ClockId].BoostKHz = BoostKHz;
+ s_BusyReqHeads[ClockId].IntervalMs = BoostDurationMs;
+ s_BusyReqHeads[ClockId].BusyPulseMode = BusyPulseMode;
+ s_BusyReqHeads[ClockId].StartTimeMs = msec;
+ s_BusyReqHeads[ClockId].ClientId = ClientId;
+ *pSignalDfs = NV_TRUE;
+ }
+
+ /*
+ * If it is a short spike no need to store the record, as maximum boost
+ * has been already updated. Otherwise, insert new busy record into the
+ * busy hints list in descending order of requested boost frequencies
+ */
+ if (BoostDurationMs > NVRM_DFS_BUSY_MIN_MS)
+ {
+ for (pInsert = &s_BusyReqHeads[ClockId] ;;)
+ {
+ if ((pInsert->pNext == NULL) ||
+ (pInsert->pNext->BoostKHz < BoostKHz))
+ {
+ // Allocate and initialize new boost hint record
+ pBusyHintReq = NvOsAlloc(sizeof(BusyHintReq));
+ if (pBusyHintReq == NULL)
+ {
+ return NvError_InsufficientMemory;
+ }
+ pBusyHintReq->BoostKHz = BoostKHz;
+ pBusyHintReq->IntervalMs = BoostDurationMs;
+ pBusyHintReq->BusyPulseMode = BusyPulseMode;
+ pBusyHintReq->StartTimeMs = msec;
+ pBusyHintReq->ClientId = ClientId;
+ pBusyHintReq->pNext = pInsert->pNext;
+ pInsert->pNext = pBusyHintReq;
+ break;
+ }
+ else if (pInsert->pNext->BoostKHz == BoostKHz)
+ {
+ // Combine hints from the same client with the same
+ // boost level and pulse mode
+ if ((pInsert->pNext->ClientId == ClientId) &&
+ (pInsert->pNext->BusyPulseMode == BusyPulseMode))
+ {
+ NvU32 t = msec - pInsert->pNext->StartTimeMs;
+ if ((BoostDurationMs > pInsert->pNext->IntervalMs) ||
+ (t > (pInsert->pNext->IntervalMs - BoostDurationMs)))
+ {
+ pInsert->pNext->StartTimeMs = msec;
+ pInsert->pNext->IntervalMs = BoostDurationMs;
+ }
+ break;
+ }
+ }
+ pInsert = pInsert->pNext;
+ }
+ PurgeBusyHints(ClockId, msec); // Purge the list once in a while
+ }
+ }
+ return NvSuccess;
+}
+
+void NvRmPrivDfsGetBusyHint(
+ NvRmDfsClockId ClockId,
+ NvRmFreqKHz* pBusyKHz,
+ NvBool* pBusyPulseMode,
+ NvU32* pBusyExpireMs)
+{
+ NvU32 msec;
+ BusyHintReq* pBusyHintReq;
+
+ NV_ASSERT((0 < ClockId) && (ClockId < NvRmDfsClockId_Num));
+
+ // Boolean read - no need for lock - fast path for most common case
+ // when no busy hints are recoeded
+ if (s_BusyReqHeads[ClockId].BoostKHz == 0)
+ {
+ *pBusyKHz = 0;
+ *pBusyPulseMode = NV_FALSE;
+ *pBusyExpireMs = 0;
+ return;
+ }
+ msec = NvOsGetTimeMS();
+
+ NvOsMutexLock(s_hPowerClientMutex);
+ /*
+ * Get boost frequency from the head. Then, traverse busy hints list,
+ * starting from the head looking for max non-expired frequency boost.
+ * Remove expired nodes on the way. Update head boost frequency.
+ */
+ pBusyHintReq = &s_BusyReqHeads[ClockId];
+ *pBusyKHz = pBusyHintReq->BoostKHz;
+ *pBusyPulseMode = pBusyHintReq->BusyPulseMode;
+ *pBusyExpireMs = 0; // assume head hint has already expired
+ if (pBusyHintReq->IntervalMs == NV_WAIT_INFINITE)
+ *pBusyExpireMs = NV_WAIT_INFINITE; // head hint until canceled
+ else if (pBusyHintReq->IntervalMs >= (msec - pBusyHintReq->StartTimeMs))
+ *pBusyExpireMs = // non-expired head hint
+ pBusyHintReq->IntervalMs - (msec - pBusyHintReq->StartTimeMs);
+
+ pBusyHintReq = pBusyHintReq->pNext;
+ while (pBusyHintReq != NULL)
+ {
+ BusyHintReq* p;
+ if (pBusyHintReq->IntervalMs >= (msec - pBusyHintReq->StartTimeMs))
+ {
+ break;
+ }
+ p = pBusyHintReq;
+ pBusyHintReq = pBusyHintReq->pNext;
+ NvOsFree(p);
+ }
+ if (pBusyHintReq)
+ {
+ s_BusyReqHeads[ClockId] = *pBusyHintReq;
+ s_BusyReqHeads[ClockId].pNext = pBusyHintReq;
+ }
+ else
+ NvOsMemset(&s_BusyReqHeads[ClockId], 0, sizeof(s_BusyReqHeads[ClockId]));
+ NvOsMutexUnlock(s_hPowerClientMutex);
+}
+
+NvError
+NvRmPowerBusyHintMulti(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 ClientId,
+ const NvRmDfsBusyHint* pMultiHint,
+ NvU32 NumHints,
+ NvRmDfsBusyHintSyncMode Mode)
+{
+ NvError error;
+ NvRmDfsRunState DfsState;
+ NvBool SignalDfs = NV_FALSE;
+ NvRmPowerClient* pPowerClient = NULL;
+ NvRmPowerRegistry* pRegistry = &s_PowerRegistry;
+ NvU32 ClientIndex = NVRM_POWER_ID2INDEX(ClientId);
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pMultiHint && NumHints);
+ DfsState = NvRmDfsGetState(hRmDeviceHandle);
+
+ /* Do nothing on platforms where there is no freq scaling like QT and FPGA */
+ if (NvRmPrivGetExecPlatform(hRmDeviceHandle) != ExecPlatform_Soc)
+ {
+ return NvSuccess;
+ }
+ // Do nothing if DFS is disabled, and therefore all clocks are maxed anyway
+ if (DfsState <= NvRmDfsRunState_Disabled)
+ {
+ return NvSuccess; // error if disabled
+ }
+
+ NvOsMutexLock(s_hPowerClientMutex);
+
+ // Check if this client ID was registered; return error otherwise
+ if (ClientIndex < pRegistry->UsedIndexRange)
+ {
+ pPowerClient = pRegistry->pPowerClients[ClientIndex];
+ }
+ if ((pPowerClient == NULL) || (pPowerClient->id != ClientId))
+ {
+ NvOsMutexUnlock(s_hPowerClientMutex);
+ return NvError_BadValue;
+ }
+ // Add new busy hint record to the list
+ error = RecordBusyHints(
+ hRmDeviceHandle, ClientId, pMultiHint, NumHints, &SignalDfs);
+
+ NvOsMutexUnlock(s_hPowerClientMutex);
+
+ if (error == NvSuccess)
+ {
+ NvRmPrivBusyHintPrintf(
+ ClientIndex, pPowerClient->tag, pMultiHint, NumHints);
+ if (SignalDfs && (DfsState > NvRmDfsRunState_Stopped))
+ {
+ // Signal DFS clock control provided DFS is running
+ NvRmPrivDfsSignal(Mode);
+ }
+ }
+ return error;
+}
+
+NvError
+NvRmPowerBusyHint (
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmDfsClockId ClockId,
+ NvU32 ClientId,
+ NvU32 BoostDurationMs,
+ NvRmFreqKHz BoostKHz)
+{
+ NvRmDfsBusyHint BusyHint;
+
+ // Pack hint record
+ BusyHint.ClockId = ClockId;
+ BusyHint.BoostKHz = BoostKHz;
+ BusyHint.BoostDurationMs = BoostDurationMs;
+ BusyHint.BusyAttribute = NV_FALSE;
+
+ return NvRmPowerBusyHintMulti(hRmDeviceHandle, ClientId, &BusyHint, 1,
+ NvRmDfsBusyHintSyncMode_Async);
+}
+
+/*****************************************************************************/
+
+NvError
+NvRmPowerActivityHint (
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmModuleID ModuleId,
+ NvU32 ClientId,
+ NvU32 ActivityDurationMs)
+{
+ /* validate the Rm Handle */
+ NV_ASSERT( hRmDeviceHandle );
+
+ return NvError_NotImplemented;
+}
+
+NvError
+NvRmKernelPowerSuspend( NvRmDeviceHandle hRmDeviceHandle )
+{
+
+ NvRmPrivGartSuspend(hRmDeviceHandle);
+ NvRmPrivPmuInterruptMask(hRmDeviceHandle, NV_TRUE);
+ NvRmPrivDfsSuspend(NvOdmQueryLowestSocPowerState()->LowestPowerState);
+
+#if NVRM_POWER_DEBUG_SUSPEND_ENTRY
+ NvOsMutexLock(s_hPowerClientMutex);
+ {
+ NvU32 i;
+ ModuleVoltageReq* pVoltageReq = NULL;
+ NvRmPowerClient* pPowerClient = NULL;
+ NvRmPowerRegistry* pRegistry = &s_PowerRegistry;
+ NvRmPowerState s = NvRmPrivPowerGetState(hRmDeviceHandle);
+
+ // Report combined RM power stste and active modules
+ NvOsDebugPrintf("RM power state before suspend: %s (%d)\n",
+ ((s == NvRmPowerState_Active) ? "Active" :
+ ((s == NvRmPowerState_AutoHw) ? "AutoHw" : "Idle")), s);
+ if (s == NvRmPowerState_Active)
+ {
+ for (i = 0; i < pRegistry->UsedIndexRange; i++)
+ {
+ pPowerClient = pRegistry->pPowerClients[i];
+ if (pPowerClient)
+ {
+ pVoltageReq = pPowerClient->pVoltageReqHead;
+ while (pVoltageReq != NULL)
+ {
+ if (pVoltageReq->MaxVolts != NvRmVoltsOff)
+ {
+ // could also set some bad e = NvError_Bad???
+ NvOsDebugPrintf("Active Module: 0x%x",
+ pVoltageReq->ModuleId);
+ }
+ pVoltageReq = pVoltageReq->pNext;
+ }
+ }
+ }
+ }
+ }
+ NvOsMutexUnlock(s_hPowerClientMutex);
+#endif
+
+ return NvSuccess;
+}
+
+NvError
+NvRmKernelPowerResume( NvRmDeviceHandle hRmDeviceHandle )
+{
+ NvRmPrivPmuInterruptMask(hRmDeviceHandle, NV_FALSE);
+ NvRmPrivGartResume(hRmDeviceHandle);
+ return NvSuccess;
+}
+
+
+
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c
new file mode 100644
index 000000000000..a154209139bd
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c
@@ -0,0 +1,3523 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Power Resource manager </b>
+ *
+ * @b Description: Implements NvRM Dynamic Voltage and Frequency Scaling for
+ * for SOC-wide clock domains.
+ *
+ */
+
+#include "nvrm_power_dfs.h"
+#include "nvrm_pmu.h"
+#include "nvassert.h"
+#include "nvrm_hwintf.h"
+#include "nvodm_query_discovery.h"
+#include "ap15/ap15rm_private.h"
+#include "ap15/ap15rm_power_dfs.h"
+#include "ap15/ap15rm_clocks.h"
+#include "ap20/ap20rm_power_dfs.h"
+#include "ap20/ap20rm_clocks.h"
+
+/*****************************************************************************/
+
+// Initial DFS configuration
+#define AP15_FPGA_FREQ (8330)
+#define AP20_FPGA_FREQ (13000)
+#define NVRM_FPGA_INITIAL_DFS_STATE (NvRmDfsRunState_Disabled)
+#define NVRM_AP15_SOC_INITIAL_DFS_STATE (NvRmDfsRunState_Stopped)
+#define NVRM_AP20_SOC_INITIAL_DFS_STATE (NvRmDfsRunState_Stopped)
+#define NVRM_EMC_DFS_DEFAULT_DISABLED (0)
+
+// Low boundaries for DFS clock frequencies imposed by download transports.
+// For Ethernet this limitation is related to MIO WAR, and it is applied only
+// to AP15 A01.
+#define NVRM_USB_AHB_MIN_KHZ (100000)
+#define NVRM_ETHERNET_AHB_MIN_KHZ (30000)
+#define NVRM_ETHERNET_EMC_MIN_KHZ (24000)
+#define NVRM_SPI_CPU_MIN_KHZ (40000)
+#define NVRM_SPI_APB_MIN_KHZ (30000)
+
+// An option to stall average accumulation during busy pulse
+#define NVRM_DFS_STALL_AVERAGE_IN_BUSY_PULSE (0)
+
+// TODO: keep disabled until AP20 bring-up
+// Options for temperature monitoring interrupt
+#define NVRM_DTT_DISABLED (1)
+#define NVRM_DTT_USE_INTERRUPT (0)
+
+/*****************************************************************************/
+
+// TODO: Always Disable before check-in
+// Module debug: 0=disable, 1=enable
+#define NVRM_DFS_ENABLE_PRINTF (0)
+#if NVRM_DFS_ENABLE_PRINTF
+#define NVRM_DFS_PRINTF(x) NvOsDebugPrintf x
+#else
+#define NVRM_DFS_PRINTF(x)
+#endif
+
+// TODO: Always Disable before check-in
+// DFS profiling: 0=disable, 1=enable (prints from clock control thread)
+#define DFS_PROFILING (0)
+
+// TODO: Always Disable before check-in
+// DFS clients busy and starvation hints report: 0=disable, 1=enable
+// (prints from client API thread)
+#define DFS_HINTS_PRINTF (0)
+
+// TODO: Always Disable before check-in
+// DFS detailed logging: 0=disable, non zero = enable (saves dfs log in memory
+// for at least specified number of seconds)
+#define DFS_LOGGING_SECONDS (0)
+
+// TODO: Always Disable before check-in
+// DFS sync busy int timeout: 0=disable, non zero = enable (set sync busy hint
+// timeout for specified number of milliseconds)
+#define DFS_SYNC_BUSY_TIMEOUT_MS (0)
+
+/*****************************************************************************/
+
+// Microsecond timer read macro
+#define NvRmPrivGetUs() NV_READ32(s_pTimerUs)
+
+/*****************************************************************************/
+
+#if DFS_PROFILING
+
+typedef struct DfsProfileRec
+{
+ NvU32 SamplesNo[NvRmDfsProfileId_Num];
+ NvU32 StartUs[NvRmDfsProfileId_Num];
+ NvU32 AccumulatedUs[NvRmDfsProfileId_Num];
+} DfsProfile;
+
+
+#define DfsProfileInit(pDfs) \
+do\
+{\
+ NvU32 i; \
+ NvOsMemset(&s_Profile, 0, sizeof(DfsProfile)); \
+ for (i = 1; i < NvRmDfsProfileId_Num; i++) \
+ {\
+ s_Profile.StartUs[i] = NvRmPrivGetUs(); \
+ }\
+} while(0)
+
+#define DfsProfileStart(pDfs, ProfileId) \
+do\
+{\
+ if ((pDfs)->DfsRunState == NvRmDfsRunState_ProfiledLoop) \
+ {\
+ s_Profile.StartUs[(ProfileId)] = NvRmPrivGetUs(); \
+ }\
+} while(0)
+
+#define DfsProfileSample(pDfs, ProfileId) \
+do\
+{\
+ if ((pDfs)->DfsRunState == NvRmDfsRunState_ProfiledLoop) \
+ {\
+ s_Profile.SamplesNo[(ProfileId)]++; \
+ s_Profile.AccumulatedUs[(ProfileId)] += \
+ (NvRmPrivGetUs() - s_Profile.StartUs[(ProfileId)]); \
+ }\
+} while(0)
+
+static DfsProfile s_Profile = {{0}};
+
+#else
+
+#define DfsProfileInit(pDfs)
+#define DfsProfileStart(pDfs, ProfileId)
+#define DfsProfileSample(pDfs, ProfileId)
+#endif
+
+/*****************************************************************************/
+
+#if DFS_HINTS_PRINTF
+
+#define DfsHintsPrintInit() \
+do\
+{\
+ s_DfsDomainNames[NvRmDfsClockId_Cpu] = "Cpu"; \
+ s_DfsDomainNames[NvRmDfsClockId_Avp] = "Avp"; \
+ s_DfsDomainNames[NvRmDfsClockId_System] = "Sys"; \
+ s_DfsDomainNames[NvRmDfsClockId_Ahb] = "Ahb"; \
+ s_DfsDomainNames[NvRmDfsClockId_Apb] = "Apb"; \
+ s_DfsDomainNames[NvRmDfsClockId_Vpipe] = "Vde"; \
+ s_DfsDomainNames[NvRmDfsClockId_Emc] = "Emc"; \
+} while (0);
+
+static char* s_DfsDomainNames[NvRmDfsClockId_Num];
+
+static void ClientTagToString(NvU32 ClientTag, char ClientName[])
+{
+ NvU32 i;
+
+ // Unpack in reverse order 4 caharacters from 32-bit DFS client tag
+ for (i = 0; i < sizeof(ClientTag); i++)
+ {
+ NvU8 c = (NvU8)(ClientTag & 0xFF);
+ ClientTag >>= 8;
+ if ((c < ' ') || (c > 0x7F))
+ c = '*'; // non-ASCII codes in non-initialized tags
+ ClientName[sizeof(ClientTag) - 1 - i] = c;
+ }
+ ClientName[sizeof(ClientTag)] = 0x00;
+}
+
+#else
+#define DfsHintsPrintInit()
+#endif
+
+/*****************************************************************************/
+
+#if DFS_LOGGING_SECONDS
+
+// Log size, assuming ~100 samples / sec
+#define DFS_LOG_SIZE (100 * DFS_LOGGING_SECONDS)
+
+typedef struct DfsLogEntryRec
+{
+ NvU32 SampleIntervalMs;
+ NvU32 Lp2TimeMs;
+ NvU32 ActiveCycles[NvRmDfsClockId_Num];
+ NvRmDfsFrequencies CurrentKHz;
+ NvRmDfsFrequencies AverageKHz;
+} DfsLogEntry;
+
+typedef struct DfsLogStarvationHintRec
+{
+ NvU32 LogSampleIndex;
+ NvU32 ClientId;
+ NvU32 ClientTag;
+ NvRmDfsStarvationHint StarvationHint;
+} DfsLogStarvationHint;
+
+typedef struct DfsLogBusyHintRec
+{
+ NvU32 LogSampleIndex;
+ NvU32 ClientId;
+ NvU32 ClientTag;
+ NvRmDfsBusyHint BusyHint;
+} DfsLogBusyHint;
+
+static NvU32 s_DfsLogWrIndex = 0;
+static DfsLogEntry s_DfsLog[DFS_LOG_SIZE];
+
+static NvU32 s_DfsLogStarvationWrIndex = 0;
+static DfsLogStarvationHint s_DfsLogStarvation[DFS_LOG_SIZE];
+
+static NvU32 s_DfsLogBusyWrIndex = 0;
+static DfsLogBusyHint s_DfsLogBusy[DFS_LOG_SIZE];
+
+#define DfsLogEnter(pDfs, Lp2Ms) \
+do\
+{\
+ if (s_DfsLogOn && (s_DfsLogWrIndex < DFS_LOG_SIZE)) \
+ {\
+ NvU32 i; \
+ DfsLogEntry* pEntry = &s_DfsLog[s_DfsLogWrIndex++]; \
+ pEntry->SampleIntervalMs = *(pDfs)->SamplingWindow.pLastInterval; \
+ pEntry->Lp2TimeMs = (Lp2Ms); \
+ for (i = 1; i < NvRmDfsClockId_Num; i++) \
+ { \
+ pEntry->ActiveCycles[i] = *(pDfs)->Samplers[i].pLastSample; \
+ pEntry->AverageKHz.Domains[i] = (pDfs)->Samplers[i].AverageKHz; \
+ } \
+ pEntry->CurrentKHz = (pDfs)->CurrentKHz; \
+ }\
+} while(0)
+
+#else
+#define DfsLogEnter(pDfs, Lp2TimeMs)
+#endif
+
+/*****************************************************************************/
+
+// DFS object
+static NvRmDfs s_Dfs;
+
+// Execution Platform
+static ExecPlatform s_Platform;
+
+// Microsecond timer virtual address
+static void* s_pTimerUs;
+
+// NV DFS logging enabled indicator
+static NvBool s_DfsLogOn = NV_FALSE;
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/*
+ * Gets monitoring capabilities of the DFS module
+ */
+static NvError SystatMonitorsGetCapabilities(NvRmDfs* pDfs);
+static NvError VdeMonitorsGetCapabilities(NvRmDfs* pDfs);
+static NvError EmcMonitorsGetCapabilities(NvRmDfs* pDfs);
+
+/*
+ * Gets monitoring capabilities of all DFS modules
+ */
+static NvError DfsGetModulesCapabilities(NvRmDfs* pDfs);
+
+/*
+ * Initializes all DFS HW monitors
+ */
+static NvError DfsHwInit(NvRmDfs* pDfs);
+
+/*
+ * Deinitializes all DFS HW monitors
+ */
+static void DfsHwDeinit(NvRmDfs* pDfs);
+
+/*
+ * Starts activity monitors in all DFS modules for the next sample interval
+ * and enables DFS interrupt
+ */
+static void
+DfsStartMonitors(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ const NvU32 IntervalMs);
+
+/*
+ * Reads idle count from activity monitors in all DFS modules. The monitors are
+ * stopped.
+ */
+static void
+DfsReadMonitors(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfsIdleData* pIdleData);
+
+/*****************************************************************************/
+
+/*
+ * Initializes DFS algorithm parameters
+ */
+static void DfsParametersInit(NvRmDfs* pDfs);
+
+/*
+ * Initializes DFS samplers for specified frequencies
+ */
+static void DfsSamplersInit(
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfs* pDfs);
+
+/*****************************************************************************/
+
+/*
+ * DFS ISR (executes DFS algorithm)
+ */
+static void DfsIsr(void* args);
+
+/*
+ * Determines target frequencies for DFS domains
+ */
+static NvBool
+DfsGetTargetFrequencies(
+ const NvRmDfsIdleData* pIdleData,
+ NvRmDfs* pDfs,
+ NvRmDfsFrequencies* pDfsKHz);
+
+/*
+ * Adds new sample interval to the sample window
+ */
+static NvBool
+AddSampleInterval(
+ NvRmDfsSampleWindow* pSampleWindow,
+ NvU32 IntervalMs);
+
+/*
+ * Adds new activity sample to the domain buffer
+ */
+static void
+AddActivitySample(
+ NvRmDfsSampler* pDomainSampler,
+ NvU32 ActiveCount);
+
+// Determine PM thread request for CPU state control
+static NvRmPmRequest
+DfsGetPmRequest(
+ NvRmDeviceHandle hRmDevice,
+ NvRmDfsSampler* pCpuSampler,
+ NvRmFreqKHz CpuKHz);
+
+/*****************************************************************************/
+
+/*
+ * DFS clock control thread entry point and termination function
+ */
+static NvRmPmRequest DfsThread(NvRmDfs* pDfs);
+static void DfsThreadTerminate(NvRmDfs* pDfs);
+
+/*
+ * Returns current frequencies of DFS clocks
+ */
+static void
+DfsClockFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmDfsFrequencies* pDfsKHz);
+
+/*
+ * Configures DFS clocks according to target frequencies,
+ * and returns actual frequencies
+ */
+static NvBool
+DfsClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDfsFrequencies* pMaxKHz,
+ NvRmDfsFrequencies* pDfsKHz);
+
+/*
+ * Clips EMC frequency high limit to one of the fixed DFS EMC configurations,
+ * and if necessary adjust CPU high limit respectively.
+ */
+static void
+DfsClipCpuEmcHighLimits(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz* pCpuHighKHz,
+ NvRmFreqKHz* pEmcHighKHz);
+
+/*
+ * Emulate sampling results to achieve specified average frequency
+ * provided it is bigger than the current one
+ */
+static void
+DfsSetAverageUp(
+ NvRmDfsClockId ClockId,
+ NvRmFreqKHz AverageKHz,
+ NvRmDfs* pDfs);
+
+/*
+ * Changes core and rtc voltages, keeping them in synch
+ */
+static void
+DvsChangeCoreVoltage(
+ NvRmDeviceHandle hRm,
+ NvRmDvs* pDvs,
+ NvRmMilliVolts TargetMv);
+
+/*
+ * Changes dedicated cpu rail voltage
+ */
+static void
+DvsChangeCpuVoltage(
+ NvRmDeviceHandle hRm,
+ NvRmDvs* pDvs,
+ NvRmMilliVolts TargetMv);
+
+/*
+ * Enable/Disable voltage scaling
+ */
+static void NvRmPrivDvsRun(void);
+static void NvRmPrivDvsStopAtNominal(void);
+
+/*
+ * Gets core temperature monitoring limits and polling time according
+ * to chip specific policy.
+ */
+static void
+DttGetTcorePolicy(
+ NvS32 TemperatureC,
+ const NvRmDfs* pDfs,
+ NvS32* pLowLimit,
+ NvS32* pHighLimit,
+ NvU32* pPollMs);
+
+/*
+ * Updates (throttles) target DFS frequencies based on SoC temperature.
+ */
+static NvBool
+DttClockUpdate(
+ const NvRmDfs* pDfs,
+ NvRmDtt* pDtt,
+ NvRmDfsFrequencies* pDfsKHz);
+
+/*
+ * DTT interrupt handler
+ */
+static void DttIntrCallback(void* args);
+
+/*****************************************************************************/
+// MONITORING CAPABILITIES
+/*****************************************************************************/
+
+static NvError SystatMonitorsGetCapabilities(NvRmDfs* pDfs)
+{
+ NvError error;
+
+ NvRmModuleID ModuleId;
+ NvRmDfsModule* pCaps;
+ NvRmDfsModule SystatCaps[1] = {{{0}}};
+ NvRmModuleCapability ModuleCaps[1];
+ NvRmModuleTable *tbl;
+
+ NvRmDeviceHandle hRm = pDfs->hRm;
+
+ tbl = NvRmPrivGetModuleTable( hRm );
+
+ /*
+ * System Statistic module includes activity monitors for CPU, AVP, AHB,
+ * and APB domains. Its presence is required for DFS to work.
+ */
+ SystatCaps[0].DomainMap[NvRmDfsClockId_Cpu] = NV_TRUE;
+ SystatCaps[0].DomainMap[NvRmDfsClockId_Avp] = NV_TRUE;
+ SystatCaps[0].DomainMap[NvRmDfsClockId_Ahb] = NV_TRUE;
+ SystatCaps[0].DomainMap[NvRmDfsClockId_Apb] = NV_TRUE;
+ SystatCaps[0].Init = NvRmPrivAp15SystatMonitorsInit;
+ SystatCaps[0].Deinit = NvRmPrivAp15SystatMonitorsDeinit;
+ SystatCaps[0].Start = NvRmPrivAp15SystatMonitorsStart;
+ SystatCaps[0].Read = NvRmPrivAp15SystatMonitorsRead;
+
+ ModuleCaps[0].MajorVersion = 1;
+ ModuleCaps[0].MinorVersion = 0;
+ ModuleCaps[0].EcoLevel = 0;
+ ModuleCaps[0].Capability = (void*)SystatCaps;
+
+ ModuleId = NVRM_MODULE_ID(NvRmModuleID_SysStatMonitor, 0);
+ error = NvRmModuleGetCapabilities(hRm, ModuleId, ModuleCaps, 1, (void **)&pCaps);
+ if (error != NvSuccess)
+ {
+ // Get capabilities failed - module is not present, DFS can not start
+ return error;
+ }
+ pCaps->pBaseReg = (tbl->ModInst +
+ tbl->Modules[NvRmModuleID_SysStatMonitor].Index)->VirtAddr;
+
+ // AP15/AP16 h/w bug 429585 - time spent by CPU in LP2 is not counted
+ // as idle - need explicitly offset monitor readings
+ if ((pDfs->hRm->ChipId.Id == 0x15) || (pDfs->hRm->ChipId.Id == 0x16))
+ {
+ pCaps->Offset = NVRM_CPU_IDLE_LP2_OFFSET;
+ }
+ pDfs->Modules[NvRmDfsModuleId_Systat] = *pCaps;
+ return NvSuccess;
+}
+
+static NvError VdeMonitorsGetCapabilities(NvRmDfs* pDfs)
+{
+ NvError error;
+
+ NvRmModuleID ModuleId;
+ NvRmDfsModule* pCaps;
+ NvRmDfsModule VdeCaps[2] = {{{0}}};
+ NvRmModuleCapability ModuleCaps[3];
+ NvRmModuleTable *tbl;
+
+ NvRmDeviceHandle hRm = pDfs->hRm;
+
+ tbl = NvRmPrivGetModuleTable( hRm );
+
+ /*
+ * VDE module includes activity monitor for video-pipe domain. This
+ * monitor may or may not be present on different versions of VDE
+ */
+ VdeCaps[0].DomainMap[NvRmDfsClockId_Vpipe] = NV_FALSE;
+
+ VdeCaps[1].DomainMap[NvRmDfsClockId_Vpipe] = NV_TRUE;
+ VdeCaps[1].Init = NvRmPrivAp15VdeMonitorsInit;
+ VdeCaps[1].Deinit = NvRmPrivAp15VdeMonitorsDeinit;
+ VdeCaps[1].Start = NvRmPrivAp15VdeMonitorsStart;
+ VdeCaps[1].Read = NvRmPrivAp15VdeMonitorsRead;
+
+ ModuleCaps[0].MajorVersion = 1; // AP15 A01
+ ModuleCaps[0].MinorVersion = 0;
+ ModuleCaps[0].EcoLevel = 0;
+ ModuleCaps[0].Capability = (void*)&VdeCaps[1];
+
+ ModuleCaps[1].MajorVersion = 1; // AP15 A02 (same caps as AP15 A01)
+ ModuleCaps[1].MinorVersion = 1;
+ ModuleCaps[1].EcoLevel = 0;
+ ModuleCaps[1].Capability = (void*)&VdeCaps[1];
+
+ ModuleCaps[2].MajorVersion = 1; // AP20 (same caps as AP15 A01)
+ ModuleCaps[2].MinorVersion = 2;
+ ModuleCaps[2].EcoLevel = 0;
+ ModuleCaps[2].Capability = (void*)&VdeCaps[1];
+
+ ModuleId = NVRM_MODULE_ID(NvRmModuleID_Vde, 0);
+ error = NvRmModuleGetCapabilities(hRm, ModuleId, ModuleCaps, 3, (void **)&pCaps);
+
+ if (error == NvSuccess)
+ {
+ if (pCaps->DomainMap[NvRmDfsClockId_Vpipe])
+ {
+ pCaps->pBaseReg =
+ (tbl->ModInst + tbl->Modules[NvRmModuleID_Vde].Index)->VirtAddr;
+ }
+ }
+ else
+ {
+ // If get capabilities failed, set "not present" cpabilities
+ pCaps = &VdeCaps[0];
+ }
+ pDfs->Modules[NvRmDfsModuleId_Vde] = *pCaps;
+ return NvSuccess;
+}
+
+static NvError EmcMonitorsGetCapabilities(NvRmDfs* pDfs)
+{
+ NvError error;
+
+ NvRmModuleID ModuleId;
+ NvRmDfsModule* pCaps;
+ NvRmDfsModule EmcCaps[3] = {{{0}}};
+ NvRmModuleCapability ModuleCaps[3];
+ NvRmModuleTable *tbl;
+
+ NvRmDeviceHandle hRm = pDfs->hRm;
+
+ tbl = NvRmPrivGetModuleTable( hRm );
+
+ /*
+ * EMC module includes activity monitor for EMC clock domain. This
+ * monitor may, or may not be present on different versions of EMC
+ */
+ EmcCaps[0].DomainMap[NvRmDfsClockId_Emc] = NV_FALSE;
+
+ EmcCaps[1].DomainMap[NvRmDfsClockId_Emc] = NV_TRUE;
+ EmcCaps[1].Init = NvRmPrivAp15EmcMonitorsInit;
+ EmcCaps[1].Deinit = NvRmPrivAp15EmcMonitorsDeinit;
+ EmcCaps[1].Start = NvRmPrivAp15EmcMonitorsStart;
+ EmcCaps[1].Read = NvRmPrivAp15EmcMonitorsRead;
+
+ EmcCaps[2].DomainMap[NvRmDfsClockId_Emc] = NV_TRUE;
+ EmcCaps[2].Init = NvRmPrivAp20EmcMonitorsInit;
+ EmcCaps[2].Deinit = NvRmPrivAp20EmcMonitorsDeinit;
+ EmcCaps[2].Start = NvRmPrivAp20EmcMonitorsStart;
+ EmcCaps[2].Read = NvRmPrivAp20EmcMonitorsRead;
+
+ ModuleCaps[0].MajorVersion = 1; // AP15 A01
+ ModuleCaps[0].MinorVersion = 0;
+ ModuleCaps[0].EcoLevel = 0;
+ ModuleCaps[0].Capability = (void*)&EmcCaps[1];
+
+ ModuleCaps[1].MajorVersion = 1; // AP15 A02 (same caps as AP15 A01)
+ ModuleCaps[1].MinorVersion = 1;
+ ModuleCaps[1].EcoLevel = 0;
+ ModuleCaps[1].Capability = (void*)&EmcCaps[1];
+
+ ModuleCaps[2].MajorVersion = 1; // AP20 EMC
+ ModuleCaps[2].MinorVersion = 2;
+ ModuleCaps[2].EcoLevel = 0;
+ ModuleCaps[2].Capability = (void*)&EmcCaps[2];
+
+ ModuleId = NVRM_MODULE_ID(NvRmPrivModuleID_ExternalMemoryController, 0);
+ error = NvRmModuleGetCapabilities(hRm, ModuleId, ModuleCaps, 3, (void **)&pCaps);
+
+ if (error == NvSuccess)
+ {
+ if (pCaps->DomainMap[NvRmDfsClockId_Emc])
+ {
+ pCaps->pBaseReg = (tbl->ModInst +
+ tbl->Modules[NvRmPrivModuleID_ExternalMemoryController].Index)->VirtAddr;
+ }
+ }
+ else
+ {
+ // If get capabilities failed, set "not present" cpabilities
+ pCaps = &EmcCaps[0];
+ }
+ pDfs->Modules[NvRmDfsModuleId_Emc] = *pCaps;
+ return NvSuccess;
+}
+
+static NvError DfsGetModulesCapabilities(NvRmDfs* pDfs)
+{
+ NvError error = SystatMonitorsGetCapabilities(pDfs);
+ if (error == NvSuccess)
+ {
+ error = VdeMonitorsGetCapabilities(pDfs);
+ }
+ if (error == NvSuccess)
+ {
+ error = EmcMonitorsGetCapabilities(pDfs);
+ }
+ return error;
+}
+
+/*****************************************************************************/
+// DFS INITIALIZATION PROCEDURES
+/*****************************************************************************/
+
+static void DfsParametersInit(NvRmDfs* pDfs)
+{
+ NvU32 i;
+ NvRmModuleClockLimits HwLimitsKHz[NvRmDfsClockId_Num];
+ const NvRmModuleClockLimits* pClimits;
+
+ // TODO: ODM query for parameters initialization?
+
+ // Macro to initialize scaling algorithm parameters
+ #define INIT_PARAM(Domain, DOMAIN) \
+ do \
+ { \
+ if ((pDfs->hRm->ChipId.Id == 0x15) || (pDfs->hRm->ChipId.Id == 0x16)) \
+ { \
+ NvRmDfsParam dp = { NVRM_DFS_PARAM_##DOMAIN##_AP15 }; \
+ pDfs->DfsParameters[NvRmDfsClockId_##Domain] = dp; \
+ } \
+ else if (pDfs->hRm->ChipId.Id == 0x20) \
+ { \
+ NvRmDfsParam dp = { NVRM_DFS_PARAM_##DOMAIN##_AP20 }; \
+ pDfs->DfsParameters[NvRmDfsClockId_##Domain] = dp; \
+ } \
+ else \
+ NV_ASSERT(!"Unsupported chip ID"); \
+ } while(0)
+
+ // Initialize scaling algorithm parameters for DFS domains
+ INIT_PARAM(Cpu, CPU);
+ INIT_PARAM(Avp, AVP);
+ INIT_PARAM(System, SYSTEM);
+ INIT_PARAM(Ahb, AHB);
+ INIT_PARAM(Apb, APB);
+ INIT_PARAM(Vpipe, VPIPE);
+ INIT_PARAM(Emc, EMC);
+
+ #undef INIT_PARAM
+
+ // Update minimum frequency boundary for DFS clocks as required for
+ // download transport support
+ switch (NvRmPrivGetDownloadTransport(pDfs->hRm))
+ {
+ case NvOdmDownloadTransport_Ethernet:
+ if ((pDfs->hRm->ChipId.Id == 0x15) &&
+ (pDfs->hRm->ChipId.Major == 0x01) &&
+ (pDfs->hRm->ChipId.Minor == 0x01))
+ {
+ pDfs->DfsParameters[NvRmDfsClockId_Apb].MinKHz =
+ pDfs->DfsParameters[NvRmDfsClockId_Ahb].MinKHz =
+ NVRM_ETHERNET_AHB_MIN_KHZ;
+ pDfs->DfsParameters[NvRmDfsClockId_Emc].MinKHz =
+ NVRM_ETHERNET_EMC_MIN_KHZ;
+ }
+ break;
+ case NvOdmDownloadTransport_Usb:
+ pDfs->DfsParameters[NvRmDfsClockId_Apb].MinKHz =
+ pDfs->DfsParameters[NvRmDfsClockId_Ahb].MinKHz =
+ pDfs->DfsParameters[NvRmDfsClockId_Emc].MinKHz =
+ NVRM_USB_AHB_MIN_KHZ;
+ break;
+ case NvOdmDownloadTransport_Spi:
+ pDfs->DfsParameters[NvRmDfsClockId_Cpu].MinKHz = NV_MAX(
+ pDfs->DfsParameters[NvRmDfsClockId_Cpu].MinKHz,
+ NVRM_SPI_CPU_MIN_KHZ);
+ if (pDfs->hRm->ChipId.Id == 0x20)
+ pDfs->DfsParameters[NvRmDfsClockId_Apb].MinKHz =
+ NVRM_SPI_APB_MIN_KHZ;
+ break;
+ default:
+ break;
+ }
+
+ // CPU clock H/w limits
+ pClimits = NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu);
+ HwLimitsKHz[NvRmDfsClockId_Cpu] = *pClimits;
+
+ // System clock H/w limits are applied to AVP, AHB, and APB
+ pClimits = NvRmPrivGetSocClockLimits(NvRmPrivModuleID_System);
+ HwLimitsKHz[NvRmDfsClockId_System] = *pClimits;
+ HwLimitsKHz[NvRmDfsClockId_Avp] = *pClimits;
+ HwLimitsKHz[NvRmDfsClockId_Ahb] = *pClimits;
+ HwLimitsKHz[NvRmDfsClockId_Apb] = *pClimits;
+
+ // V-pipe clock H/w limits
+ pClimits = NvRmPrivGetSocClockLimits(NvRmModuleID_Vde);
+ HwLimitsKHz[NvRmDfsClockId_Vpipe] = *pClimits;
+
+ // EMC clock H/w limits (the limit table specifies EMC2x limits); on SoC
+ // PLLM0 is used as a high limit for DFS
+ pClimits =
+ NvRmPrivGetSocClockLimits(NvRmPrivModuleID_ExternalMemoryController);
+ HwLimitsKHz[NvRmDfsClockId_Emc].MaxKHz = pClimits->MaxKHz / 2;
+ HwLimitsKHz[NvRmDfsClockId_Emc].MinKHz = pClimits->MinKHz / 2;
+ if (s_Platform == ExecPlatform_Soc)
+ {
+ HwLimitsKHz[NvRmDfsClockId_Emc].MaxKHz =
+ NvRmPrivGetClockSourceFreq(NvRmClockSource_PllM0) / 2;
+ }
+
+ // Clip requested clock boundaries to h/w limits, and initialize
+ // low/high corner with minimum/maximum domain frequencies
+ for (i = 1; i < NvRmDfsClockId_Num; i++)
+ {
+ if (pDfs->DfsParameters[i].MaxKHz > HwLimitsKHz[i].MaxKHz)
+ pDfs->DfsParameters[i].MaxKHz = HwLimitsKHz[i].MaxKHz;
+ if (pDfs->DfsParameters[i].MinKHz < HwLimitsKHz[i].MinKHz)
+ pDfs->DfsParameters[i].MinKHz = HwLimitsKHz[i].MinKHz;
+ pDfs->LowCornerKHz.Domains[i] = pDfs->DfsParameters[i].MinKHz;
+ pDfs->HighCornerKHz.Domains[i] = pDfs->DfsParameters[i].MaxKHz;
+ }
+ pDfs->CpuCornersShadow.MinKHz =
+ pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Cpu];
+ pDfs->CpuCornersShadow.MaxKHz =
+ pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Cpu];
+
+#if NVRM_EMC_DFS_DEFAULT_DISABLED
+ pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Emc] =
+ pDfs->DfsParameters[NvRmDfsClockId_Emc].MaxKHz;
+#endif
+ pDfs->CpuEnvelopeSet = NV_FALSE;
+ pDfs->EmcEnvelopeSet = NV_FALSE;
+
+ // Set initial boundaries for sampling interval
+ pDfs->SamplingWindow.MinIntervalMs = NVRM_DFS_MIN_SAMPLE_MS;
+ pDfs->SamplingWindow.MaxIntervalMs = NVRM_DFS_MAX_SAMPLE_MS;
+ if (pDfs->hRm->ChipId.Id == 0x20) // constant for AP20 (TODO: revisit)
+ pDfs->SamplingWindow.MaxIntervalMs = NVRM_DFS_MIN_SAMPLE_MS;
+
+ // Fill in maximum DFS domains frequencies shortcut
+ for (i = 1; i < NvRmDfsClockId_Num; i++)
+ pDfs->MaxKHz.Domains[i] = pDfs->DfsParameters[i].MaxKHz;
+}
+
+static void DfsSamplersInit(
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfs* pDfs)
+{
+ NvU32 i, j, msec;
+ NvRmDfsSampleWindow* pSampleWindow;
+
+ /*
+ * Clear Low Power Corner indicators, initilize current
+ * and target frequencies
+ */
+ pDfs->LowCornerHit = NV_FALSE;
+ pDfs->LowCornerReport = NV_FALSE;
+ NvRmPrivUpdateDfsPauseFlag(pDfs->hRm, NV_FALSE);
+
+ pDfs->CurrentKHz = *pDfsKHz;
+ pDfs->TargetKHz = pDfs->CurrentKHz;
+
+ /*
+ * Initialize one full sampling window before DFS start. Use minimum
+ * sampling interval.
+ */
+ pSampleWindow = &pDfs->SamplingWindow;
+ msec = pSampleWindow->MinIntervalMs;
+ pSampleWindow->NextIntervalMs = msec;
+ for (j = 0; j < NVRM_DFS_MAX_SAMPLES; j++)
+ {
+ pSampleWindow->IntervalsMs[j] = msec;
+ }
+ pSampleWindow->pLastInterval = pSampleWindow->IntervalsMs;
+ pSampleWindow->SampleWindowMs = (msec << NVRM_DFS_MAX_SAMPLES_LOG2);
+ pSampleWindow->BusyCheckLastUs = 0;
+ pSampleWindow->BusyCheckDelayUs = 0;
+
+ /*
+ * Initialize domain samplers
+ */
+ for (i = 1; i < NvRmDfsClockId_Num; i++)
+ {
+ NvRmFreqKHz khz = pDfs->CurrentKHz.Domains[i];
+ NvRmDfsSampler* pSampler = &pDfs->Samplers[i];
+ NvU32 cycles = khz * msec;
+
+ // Clear busy boost
+ pDfs->BusyKHz.Domains[i] = 0;
+
+ // Store DFS Clock Id
+ pSampler->ClockId = i;
+
+ // Use modules capabilities to determine if domain monitor is present
+ for (j = 1; j < NvRmDfsModuleId_Num; j++)
+ {
+ pSampler->MonitorPresent |=
+ pDfs->Modules[j].DomainMap[i];
+ }
+
+ // Initialize sampler data assuming constant current frequency
+ // for one sampling window before the DFS start
+ for (j = 0; j < NVRM_DFS_MAX_SAMPLES; j++)
+ {
+ pSampler->Cycles[j] = cycles;
+ }
+ pSampler->pLastSample = pSampler->Cycles;
+ pSampler->TotalActiveCycles = (cycles << NVRM_DFS_MAX_SAMPLES_LOG2);
+ if (pSampler->MonitorPresent)
+ {
+ pSampler->AverageKHz = khz;
+ pSampler->BumpedAverageKHz = khz;
+ }
+ else
+ {
+ // For domain without monitor, average frequency is unspecified
+ // and low corner is used as a base for target clalculation
+ pSampler->AverageKHz = NvRmFreqUnspecified;
+ pSampler->BumpedAverageKHz = pDfs->LowCornerKHz.Domains[i];
+ }
+ pSampler->NrtSampleCounter = 0;
+ pSampler->NrtStarveBoostKHz = 0;
+ pSampler->RtStarveBoostKHz = 0;
+ pSampler->BusyPulseMode = NV_FALSE;
+ }
+}
+
+static NvError DfsHwInit(NvRmDfs* pDfs)
+{
+ NvU32 i;
+ NvError error = NvSuccess;
+
+ s_pTimerUs = NvRmPrivAp15GetTimerUsVirtAddr(pDfs->hRm);
+
+ for (i = 1; i < NvRmDfsModuleId_Num; i++)
+ {
+ if (pDfs->Modules[i].Init)
+ {
+ error = pDfs->Modules[i].Init(pDfs);
+ if (error != NvSuccess)
+ {
+ break;
+ }
+ }
+ }
+ return error;
+}
+
+static void DfsHwDeinit(NvRmDfs* pDfs)
+{
+ NvU32 i;
+
+ if (pDfs && pDfs->hRm)
+ {
+ for (i = 1; i < NvRmDfsModuleId_Num; i++)
+ {
+ if (pDfs->Modules[i].Deinit)
+ {
+ pDfs->Modules[i].Deinit(pDfs);
+ }
+ }
+ }
+}
+
+/*****************************************************************************/
+// DFS ALGORITHM IMPLEMENTATION
+/*****************************************************************************/
+
+static void
+DfsStartMonitors(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ const NvU32 IntervalMs)
+{
+ NvU32 i;
+
+ for (i = 1; i < NvRmDfsModuleId_Num; i++)
+ {
+ FuncPtrModuleMonitorsStart start = pDfs->Modules[i].Start;
+ if (start)
+ {
+ start(pDfs, pDfsKHz, IntervalMs);
+ }
+ }
+}
+
+static void
+DfsReadMonitors(
+ const NvRmDfs* pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfsIdleData* pIdleData)
+{
+ NvU32 i;
+
+ for (i = 1; i < NvRmDfsModuleId_Num; i++)
+ {
+ FuncPtrModuleMonitorsRead read = pDfs->Modules[i].Read;
+ if (read)
+ {
+ read(pDfs, pDfsKHz, pIdleData);
+ }
+ }
+}
+
+static NvRmPmRequest
+DfsGetPmRequest(
+ NvRmDeviceHandle hRmDevice,
+ NvRmDfsSampler* pCpuSampler,
+ NvRmFreqKHz CpuKHz)
+{
+ if (hRmDevice->ChipId.Id == 0x20)
+ {
+ return NvRmPrivAp20GetPmRequest(hRmDevice, pCpuSampler, CpuKHz);
+ }
+ return NvRmPmRequest_None;
+}
+
+static NvBool
+DfsGetTargetFrequencies(
+ const NvRmDfsIdleData* pIdleData,
+ NvRmDfs* pDfs,
+ NvRmDfsFrequencies* pDfsKHz)
+{
+ static NvRmDfsFrequencies LastKHz = {{0}};
+
+ NvU32 i;
+ NvBool BusyCheckTime;
+ NvBool ReturnValue = NV_FALSE;
+ NvBool LowCornerHit = NV_TRUE;
+ NvU32 msec = pIdleData->CurrentIntervalMs;
+ NvU32 usec = NvRmPrivGetUs();
+
+ // Add current sample interval to sampling window; always signal to clock
+ // control thread if window wraparound; check busy hints expirtaion time
+ ReturnValue = AddSampleInterval(&pDfs->SamplingWindow, msec);
+ pDfs->SamplingWindow.SampleCnt++;
+ BusyCheckTime = pDfs->SamplingWindow.BusyCheckDelayUs <
+ (usec - pDfs->SamplingWindow.BusyCheckLastUs);
+
+ // Update thermal throttling polling control
+ if (!NVRM_DTT_DISABLED && pDfs->ThermalThrottler.hOdmTcore)
+ {
+ if (pDfs->ThermalThrottler.RdIntervalUs <
+ (usec - pDfs->ThermalThrottler.RdTimeUs))
+ {
+ pDfs->ThermalThrottler.RdTimeUs = usec;
+ pDfs->ThermalThrottler.UpdateFlag = NV_TRUE;
+ }
+ }
+
+ // Update cumulative log time (including LP2 time)
+ if (s_DfsLogOn)
+ {
+ pDfs->SamplingWindow.CumulativeLogMs +=
+ (pIdleData->CurrentIntervalMs + pIdleData->Lp2TimeMs);
+ if (pIdleData->Lp2TimeMs)
+ {
+ pDfs->SamplingWindow.CumulativeLp2TimeMs += pIdleData->Lp2TimeMs;
+ pDfs->SamplingWindow.CumulativeLp2Entries++;
+ }
+ }
+ // Update LP2 indicator to synchronize DVFS state with dedicated CPU
+ // rail after LP2 exit (required if CPU rail returns to default level
+ // by PMU underneath DVFS on every LP2 exit)
+ if (pIdleData->Lp2TimeMs && pDfs->VoltageScaler.VCpuOTPOnWakeup &&
+ NvRmPrivIsCpuRailDedicated(pDfs->hRm))
+ {
+ pDfs->VoltageScaler.Lp2SyncOTPFlag = NV_TRUE;
+ pDfs->VoltageScaler.UpdateFlag = NV_TRUE;
+ }
+
+ // Determine target frequency for each DFS domain
+ for (i = 1; i < NvRmDfsClockId_Num; i++)
+ {
+ NvRmDfsSampler* pDomainSampler = &pDfs->Samplers[i];
+ NvRmDfsParam* pDomainParam = &pDfs->DfsParameters[i];
+ NvRmFreqKHz* pDomainKHz = &pDfsKHz->Domains[i];
+ NvRmFreqKHz CurrentDomainKHz = *pDomainKHz;
+ NvRmFreqKHz LowCornerDomainKHz = pDfs->LowCornerKHz.Domains[i];
+ NvRmFreqKHz HighCornerDomainKHz = pDfs->HighCornerKHz.Domains[i];
+ NvRmFreqKHz DomainBusyKHz = pDfs->BusyKHz.Domains[i]; // from dfs thread
+
+ /*
+ * Find and adjust average activity frequency over the sampling
+ * window
+ */
+ if (pDomainSampler->MonitorPresent)
+ {
+ NvU32 IdleCount = pIdleData->Readings[i];
+ NvU32 ActiveCount = msec * CurrentDomainKHz; // max if never idle
+
+ // Update cumulative number of cycles
+ if (s_DfsLogOn)
+ pDomainSampler->CumulativeLogCycles +=
+ (ActiveCount + pIdleData->Lp2TimeMs * CurrentDomainKHz);
+
+ // Raw average = Sum(Activity Counts within sampling window)
+ // divided by Sum(Sampling Intervals within sampling window)
+ ActiveCount =
+ (ActiveCount > IdleCount) ? (ActiveCount - IdleCount) : (0);
+#if NVRM_DFS_STALL_AVERAGE_IN_BUSY_PULSE
+ if (!pDomainSampler->BusyPulseMode)
+#endif
+ {
+ AddActivitySample(pDomainSampler, ActiveCount);
+ }
+
+ pDomainSampler->AverageKHz = (NvU32)NvDiv64(pDomainSampler->TotalActiveCycles,
+ pDfs->SamplingWindow.SampleWindowMs);
+
+ // Check non real-time starvation
+ if ((IdleCount >= (1 + (ActiveCount >> pDomainParam->RelAdjustBits))) &&
+ (pDomainSampler->BumpedAverageKHz >= pDomainSampler->AverageKHz))
+ {
+ pDomainSampler->NrtSampleCounter = 0;
+ if (pDomainSampler->NrtStarveBoostKHz != 0)
+ {
+ // Domain is not starving, previously added boost has not been
+ // removed, yet - decrease starvation boost proportionally
+ pDomainSampler->NrtStarveBoostKHz = (pDomainSampler->NrtStarveBoostKHz *
+ ((0x1 << BOOST_FRACTION_BITS) - pDomainParam->NrtStarveParam.BoostDecKoef))
+ >> BOOST_FRACTION_BITS;
+ }
+ }
+ else if (pDomainSampler->NrtSampleCounter < pDomainParam->MinNrtSamples)
+ {
+ pDomainSampler->NrtSampleCounter++;
+ }
+ else
+ {
+ // Domain is starving - increase starvation boost
+ // (proportionally plus a fixed step)
+ pDomainSampler->NrtStarveBoostKHz = ((pDomainSampler->NrtStarveBoostKHz *
+ ((0x1 << BOOST_FRACTION_BITS) + pDomainParam->NrtStarveParam.BoostIncKoef))
+ >> BOOST_FRACTION_BITS) + pDomainParam->NrtStarveParam.BoostStepKHz;
+
+ // Make sure the boost value is within domain limits
+ if (pDomainSampler->NrtStarveBoostKHz > pDomainParam->MaxKHz)
+ pDomainSampler->NrtStarveBoostKHz = pDomainParam->MaxKHz;
+ }
+
+ // Average frequency change is recognized by DFS only if it exceeds
+ // tolerance band.
+ if ((pDomainSampler->AverageKHz + pDomainParam->LowerBandKHz) <
+ pDomainSampler->BumpedAverageKHz)
+ {
+ pDomainSampler->BumpedAverageKHz =
+ pDomainSampler->AverageKHz + pDomainParam->LowerBandKHz;
+ }
+ else if (pDomainSampler->AverageKHz >
+ (pDomainSampler->BumpedAverageKHz + pDomainParam->UpperBandKHz))
+ {
+ pDomainSampler->BumpedAverageKHz =
+ pDomainSampler->AverageKHz - pDomainParam->UpperBandKHz;
+ }
+
+ // Adjust average frequency up, to probe non real-time starvation
+ pDomainSampler->BumpedAverageKHz +=
+ (pDomainSampler->BumpedAverageKHz >> pDomainParam->RelAdjustBits);
+ }
+ else
+ {
+ // For domain without monitor average frequency is unspecified
+ // and low corner is used as a base for target clalculation
+ pDomainSampler->AverageKHz = NvRmFreqUnspecified;
+ pDomainSampler->BumpedAverageKHz = LowCornerDomainKHz;
+ }
+
+ /*
+ * Check real time starvation
+ */
+ if(NvRmPrivDfsIsStarving(i))
+ {
+ // Domain is starving - increase starvation boost (proportionally
+ // plus a fixed step)
+ pDomainSampler->RtStarveBoostKHz = ((pDomainSampler->RtStarveBoostKHz *
+ ((0x1 << BOOST_FRACTION_BITS) + pDomainParam->RtStarveParam.BoostIncKoef))
+ >> BOOST_FRACTION_BITS) + pDomainParam->RtStarveParam.BoostStepKHz;
+
+ // Make sure the boost value is within domain limits
+ if (pDomainSampler->RtStarveBoostKHz > pDomainParam->MaxKHz)
+ pDomainSampler->RtStarveBoostKHz = pDomainParam->MaxKHz;
+ }
+ else if (pDomainSampler->RtStarveBoostKHz != 0)
+ {
+ // Domain is not starving, previously added boost has not been
+ // removed, yet - decrease starvation boost proportionally
+ pDomainSampler->RtStarveBoostKHz = (pDomainSampler->RtStarveBoostKHz *
+ ((0x1 << BOOST_FRACTION_BITS) - pDomainParam->RtStarveParam.BoostDecKoef))
+ >> BOOST_FRACTION_BITS;
+ }
+
+ /*
+ * Combine average, starvation and busy demands into target frequency,
+ * and clip it to the domain limits. Check low power corner hit. Set
+ * return value if clock update is necessary.
+ */
+ if (pDomainSampler->RtStarveBoostKHz >= pDomainSampler->NrtStarveBoostKHz)
+ {
+ *pDomainKHz = pDomainSampler->BumpedAverageKHz +
+ pDomainSampler->RtStarveBoostKHz;
+ }
+ else
+ {
+ *pDomainKHz = pDomainSampler->BumpedAverageKHz +
+ pDomainSampler->NrtStarveBoostKHz;
+ }
+ if ((*pDomainKHz) < DomainBusyKHz)
+ {
+ (*pDomainKHz) = DomainBusyKHz;
+ }
+ if ((*pDomainKHz) < LowCornerDomainKHz)
+ {
+ *pDomainKHz = LowCornerDomainKHz;
+ }
+ if ((*pDomainKHz) > HighCornerDomainKHz)
+ {
+ *pDomainKHz = HighCornerDomainKHz;
+ }
+
+ /*
+ * Domain frequency is above low limit with tolerance band, or frequency
+ * is above low limit and it was not in low corner (hysteresis) - clear
+ * low corner hit
+ */
+ if ( ((*pDomainKHz) >
+ (LowCornerDomainKHz + pDomainParam->UpperBandKHz)) ||
+ (((*pDomainKHz) > LowCornerDomainKHz) && (!pDfs->LowCornerHit))
+ )
+ {
+ LowCornerHit = NV_FALSE;
+ }
+
+ /*
+ * Update PM request. Set return value if CPU power state change
+ * is requested.
+ */
+ if (i == NvRmDfsClockId_Cpu)
+ {
+ NvRmPmRequest r =
+ DfsGetPmRequest(pDfs->hRm, pDomainSampler, *pDomainKHz);
+ if (r != NvRmPmRequest_None)
+ {
+ pDfs->PmRequest = r;
+ ReturnValue = NV_TRUE;
+ }
+ }
+
+ // Set return value, if the new target is outside the tolerance band
+ // around the last recorded target, or if domain is busy
+ ReturnValue = ReturnValue || (DomainBusyKHz && BusyCheckTime) ||
+ (((*pDomainKHz) + pDomainParam->LowerBandKHz) <= LastKHz.Domains[i]) ||
+ ((*pDomainKHz) >= (LastKHz.Domains[i] + pDomainParam->UpperBandKHz));
+ }
+ // Update low corner hit status if necessary
+ if (pDfs->LowCornerHit != LowCornerHit)
+ {
+ pDfs->LowCornerHit = LowCornerHit;
+ pDfs->LowCornerReport = NV_TRUE;
+ ReturnValue = NV_TRUE;
+ }
+ // Update last recorded target if clock thread is to be signaled
+ if (ReturnValue)
+ {
+ LastKHz = *pDfsKHz;
+ }
+ return ReturnValue;
+}
+
+static NvBool
+AddSampleInterval(
+ NvRmDfsSampleWindow* pSampleWindow,
+ NvU32 IntervalMs)
+{
+ /*
+ * Add current sampling interval to the sampling window (i.e., replace the
+ * first/"oldest" interval with the new one and update window size).
+ */
+ NvBool WrapAround = NV_FALSE;
+
+ NvU32* pFirst = pSampleWindow->pLastInterval + 1;
+ if (pFirst >= &pSampleWindow->IntervalsMs[
+ NV_ARRAY_SIZE(pSampleWindow->IntervalsMs)])
+ {
+ pFirst = pSampleWindow->IntervalsMs;
+ WrapAround = NV_TRUE;
+ }
+ pSampleWindow->pLastInterval = pFirst;
+
+ pSampleWindow->SampleWindowMs += IntervalMs;
+ pSampleWindow->SampleWindowMs -= (*pFirst);
+ *pFirst = IntervalMs;
+
+ return WrapAround;
+}
+
+static void
+AddActivitySample(
+ NvRmDfsSampler* pDomainSampler,
+ NvU32 ActiveCount)
+{
+ /*
+ * Add new activity sample to the cicular buffer(i.e., replace the
+ * first/"oldest" sample with the new one) and update total cycle count
+ */
+ NvU32* pFirst = pDomainSampler->pLastSample + 1;
+ if (pFirst >= &pDomainSampler->Cycles[
+ NV_ARRAY_SIZE(pDomainSampler->Cycles)])
+ {
+ pFirst = pDomainSampler->Cycles;
+ }
+ pDomainSampler->pLastSample = pFirst;
+
+ pDomainSampler->TotalActiveCycles += ActiveCount;
+ pDomainSampler->TotalActiveCycles -= (*pFirst);
+ *pFirst = ActiveCount;
+}
+
+static void DfsIsr(void* args)
+{
+ NvRmDfs* pDfs = (NvRmDfs*)args;
+ NvBool ClockChange = NV_FALSE;
+ NvRmDfsFrequencies DfsKHz;
+ NvRmDfsIdleData IdleData;
+ NvU32 msec;
+
+ DfsProfileStart(pDfs, NvRmDfsProfileId_Isr);
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ DfsProfileStart(pDfs, NvRmDfsProfileId_Algorithm);
+
+ // Input to DFS algorithm from clock control thread: current frequencies
+ DfsKHz = pDfs->CurrentKHz;
+
+ // Adjust next sampling interval based on CPU domain frequency; keep it
+ // minimum if NRT threshold was crossed during the last sample
+ msec = pDfs->SamplingWindow.MinIntervalMs;
+ if (pDfs->Samplers[NvRmDfsClockId_Cpu].NrtSampleCounter == 0)
+ {
+ if (DfsKHz.Domains[NvRmDfsClockId_Cpu] <
+ (pDfs->DfsParameters[NvRmDfsClockId_Cpu].MinKHz +
+ pDfs->DfsParameters[NvRmDfsClockId_Cpu].UpperBandKHz))
+ msec = pDfs->SamplingWindow.MaxIntervalMs;
+ }
+ pDfs->SamplingWindow.NextIntervalMs = msec;
+
+ // Read idle counts from monitors, which clears DFS interrupt
+ DfsReadMonitors(pDfs, &DfsKHz, &IdleData);
+
+ if (pDfs->DfsRunState > NvRmDfsRunState_Stopped)
+ {
+ // If DFS is running re-start monitors, execute DFS algorithm, and
+ // determine new target frequencies for the clock control thread
+ DfsStartMonitors(pDfs, &DfsKHz, msec);
+ ClockChange = DfsGetTargetFrequencies(&IdleData, pDfs, &DfsKHz);
+ pDfs->TargetKHz = DfsKHz;
+ ClockChange = ClockChange || pDfs->VoltageScaler.UpdateFlag ||
+ pDfs->ThermalThrottler.UpdateFlag;
+ }
+ DfsProfileSample(pDfs, NvRmDfsProfileId_Algorithm);
+ DfsLogEnter(pDfs, IdleData.Lp2TimeMs);
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+
+ // Signal clock control thread if clocks should be changed
+ if (ClockChange)
+ {
+ NvOsSemaphoreSignal(pDfs->hSemaphore);
+ }
+ DfsProfileSample(pDfs, NvRmDfsProfileId_Isr);
+
+ NvRmInterruptDone(pDfs->DfsInterruptHandle);
+}
+
+/*****************************************************************************/
+// DFS CLOCK CONTROL THREAD
+/*****************************************************************************/
+
+static NvRmPmRequest DfsThread(NvRmDfs* pDfs)
+{
+ static NvRmDfsFrequencies LastKHz = {{0}};
+
+ NvRmPowerEvent PowerEvent;
+ NvRmDfsRunState DfsRunState;
+ NvRmDfsFrequencies DfsKHz, HighKHz;
+ NvBool LowCornerHit, LowCornerReport, NeedClockUpdate;
+ NvU32 i, BusyCheckDelayMs;
+
+ NvRmPmRequest PmRequest = NvRmPmRequest_None;
+
+ // Thread has been initialized
+ pDfs->InitializedThread = NV_TRUE;
+
+ // CLOCK CONTROL EXECUTION LOOP //
+ /********************************/
+ {
+ NvOsSemaphoreWait(pDfs->hSemaphore);
+ if (pDfs->AbortThread)
+ {
+ pDfs->AbortThread = NV_FALSE;
+ return NvRmPmRequest_ExitFlag;
+ }
+ DfsProfileStart(pDfs, NvRmDfsProfileId_Control);
+
+ // Save traget frequency and DFS state variables, updated by ISR
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ DfsKHz = pDfs->TargetKHz;
+ HighKHz = pDfs->HighCornerKHz;
+ DfsRunState = pDfs->DfsRunState;
+ LowCornerHit = pDfs->LowCornerHit;
+ LowCornerReport = pDfs->LowCornerReport;
+ pDfs->LowCornerReport = NV_FALSE;
+ PmRequest = pDfs->PmRequest;
+ pDfs->PmRequest = NvRmPmRequest_None;
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+
+ /*
+ * On exit from low power state re-initialize DFS h/w, samplers, and
+ * start monitors provided DFS is running. If DFS is stopped just get
+ * DFS h/w ready.
+ */
+ NV_ASSERT_SUCCESS(NvRmPowerGetEvent(
+ pDfs->hRm, pDfs->PowerClientId, &PowerEvent));
+ if (PowerEvent != NvRmPowerEvent_NoEvent)
+ {
+ // Full h/w re-initialization after LP0
+ if (PowerEvent == NvRmPowerEvent_WakeLP0)
+ {
+ DfsHwDeinit(pDfs);
+ NV_ASSERT_SUCCESS(DfsHwInit(pDfs));
+ }
+ // Re-initialize samplers if DVFS was running, but stopped on
+ // entry to LPx; keep sampling history, if DVFS was not stopped;
+ // restart monitors in either case
+ NvRmPrivLockSharedPll();
+ if (pDfs->DfsLPxSavedState > NvRmDfsRunState_Stopped)
+ {
+ DfsClockFreqGet(pDfs->hRm, &DfsKHz);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ if (pDfs->DfsRunState <= NvRmDfsRunState_Stopped)
+ {
+ pDfs->DfsRunState = pDfs->DfsLPxSavedState;
+ DfsSamplersInit(&DfsKHz, pDfs);
+ }
+ NV_ASSERT(pDfs->DfsRunState == pDfs->DfsLPxSavedState);
+ pDfs->CurrentKHz = DfsKHz;
+ DfsStartMonitors(
+ pDfs, &DfsKHz, pDfs->SamplingWindow.MinIntervalMs);
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ }
+ NvRmPrivDvsRun(); // enable v-scaling even if DFS is stopped
+ NvRmPrivUnlockSharedPll();
+ return PmRequest;
+ }
+
+ /*
+ * Advance busy hint state machine if DFS thread has been signaled by
+ * synchronous busy hint.
+ */
+ if (pDfs->BusySyncState == NvRmDfsBusySyncState_Signal)
+ {
+ pDfs->BusySyncState = NvRmDfsBusySyncState_Execute;
+ pDfs->VoltageScaler.UpdateFlag = NV_TRUE;
+ }
+
+ /*
+ * When DFS is running evaluate busy boost and low corner status;
+ * check if new target frequencies are significantly different from
+ * the previously targeted.
+ */
+ if (DfsRunState > NvRmDfsRunState_Stopped)
+ {
+ NeedClockUpdate = NV_FALSE;
+ BusyCheckDelayMs = NVRM_DFS_BUSY_PURGE_MS;
+
+ for (i = 1; i < NvRmDfsClockId_Num; i++)
+ {
+ NvRmFreqKHz NewBusyKHz;
+ NvBool NewPulseMode;
+ NvU32 delay;
+ NvRmFreqKHz OldBusyKHz = pDfs->BusyKHz.Domains[i];
+ NvBool OldBusyPulseMode = pDfs->Samplers[i].BusyPulseMode;
+ NvRmPrivDfsGetBusyHint(i, &NewBusyKHz, &NewPulseMode, &delay);
+
+ if ((NewBusyKHz != 0) || (OldBusyKHz != 0))
+ {
+ // When busy boost decreasing re-init average to the
+ // boosted level
+ if (NewBusyKHz < OldBusyKHz)
+ {
+ if (!OldBusyPulseMode)
+ {
+ NvU32 AverageKHz = OldBusyKHz - (OldBusyKHz / (1 +
+ (0x1 << pDfs->DfsParameters[i].RelAdjustBits)));
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ DfsSetAverageUp(i, AverageKHz, pDfs);
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ }
+ // Make sure new frequency to be set is above max busy
+ // and update DFS object
+ if (DfsKHz.Domains[i] < OldBusyKHz)
+ {
+ DfsKHz.Domains[i] = OldBusyKHz;
+ }
+ }
+ else
+ {
+ // Make sure new frequency to be set is above max busy
+ // and update DFS object
+ if (DfsKHz.Domains[i] < NewBusyKHz)
+ {
+ DfsKHz.Domains[i] = NewBusyKHz;
+ }
+ }
+ // Clip new dfs target to high domain corner
+ if (DfsKHz.Domains[i] > HighKHz.Domains[i])
+ {
+ DfsKHz.Domains[i] = HighKHz.Domains[i];
+ }
+ pDfs->BusyKHz.Domains[i] = NewBusyKHz;
+ pDfs->Samplers[i].BusyPulseMode = NewPulseMode;
+ if (BusyCheckDelayMs > delay)
+ BusyCheckDelayMs = delay; // Min delay to next check
+ }
+ // Compare new domain target with the previous one - need clock
+ // update if they differ significantly
+ NeedClockUpdate = NeedClockUpdate ||
+ ((DfsKHz.Domains[i] + pDfs->DfsParameters[i].LowerBandKHz) <= LastKHz.Domains[i]) ||
+ (DfsKHz.Domains[i] >= (LastKHz.Domains[i] + pDfs->DfsParameters[i].UpperBandKHz));
+ }
+ // Make sure busy hints will be checked in time
+ pDfs->SamplingWindow.BusyCheckLastUs = NvRmPrivGetUs();
+ pDfs->SamplingWindow.BusyCheckDelayUs = BusyCheckDelayMs * 1000;
+
+ // Low corner report
+ if (LowCornerReport)
+ {
+ NVRM_DFS_PRINTF(("DFS got %s low corner\n",
+ (LowCornerHit ? "into" : "out of")));
+ NvRmPrivUpdateDfsPauseFlag(pDfs->hRm, LowCornerHit);
+ }
+ }
+ else
+ {
+ // DFS is stopped - thread is signaled by API, always update clock
+ NeedClockUpdate = NV_TRUE;
+ }
+
+ // Configure DFS clocks and update current frequencies if necessary
+ // (do not touch clocks and voltage if DVS is stopped)
+ if (NeedClockUpdate || pDfs->VoltageScaler.UpdateFlag ||
+ pDfs->ThermalThrottler.UpdateFlag)
+ {
+ NvRmPrivLockSharedPll();
+ if (!pDfs->VoltageScaler.StopFlag)
+ {
+ // Check temperature and throttle DFS clocks if necessry. Make
+ // sure V/F scaling is running while throotling is in progress.
+ pDfs->VoltageScaler.UpdateFlag =
+ DttClockUpdate(pDfs, &pDfs->ThermalThrottler, &DfsKHz);
+ LastKHz = DfsKHz;
+ for (;;)
+ {
+ if (DfsClockConfigure(pDfs->hRm, &pDfs->MaxKHz, &DfsKHz))
+ break;
+ DfsKHz = LastKHz;
+ }
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ pDfs->CurrentKHz = DfsKHz;
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ }
+ NvRmPrivUnlockSharedPll();
+
+ // Complete synchronous busy hint processing.
+ if (pDfs->BusySyncState == NvRmDfsBusySyncState_Execute)
+ {
+ pDfs->BusySyncState = NvRmDfsBusySyncState_Idle;
+ NvOsSemaphoreSignal(pDfs->hSyncBusySemaphore);
+ }
+ }
+ DfsProfileSample(pDfs, NvRmDfsProfileId_Control);
+ }
+ if (PmRequest != NvRmPmRequest_None)
+ {
+ NVRM_DFS_PRINTF(("PM request: 0x%x\n", PmRequest));
+ }
+ return PmRequest;
+}
+
+static void DfsThreadTerminate(NvRmDfs* pDfs)
+{
+ /*
+ * Request thread abort, signal semaphore to make sure the thread is
+ * awaken and wait for its self-termination. Do nothing if invalid DFS
+ * structure
+ */
+ if (pDfs)
+ {
+ if (pDfs->hSemaphore && pDfs->InitializedThread)
+ {
+ pDfs->AbortThread = NV_TRUE;
+ NvOsSemaphoreSignal(pDfs->hSemaphore);
+ for (;;)
+ {
+ if (!pDfs->AbortThread)
+ {
+ break;
+ }
+ NvOsSleepMS(10);
+ }
+ }
+ }
+}
+
+static void
+DfsSetAverageUp(
+ NvRmDfsClockId ClockId,
+ NvRmFreqKHz AverageKHz,
+ NvRmDfs* pDfs)
+{
+ NvRmDfsSampler* pDomainSampler = &pDfs->Samplers[ClockId];
+
+ // Update monitored domain average frequency up
+ if ((pDomainSampler->MonitorPresent) &&
+ (pDomainSampler->AverageKHz < AverageKHz))
+ {
+ NvU32 cycles, j;
+ NvU64 NewTotalCycles =
+ (NvU64)AverageKHz * pDfs->SamplingWindow.SampleWindowMs;
+ cycles = (NvU32)(NewTotalCycles >> NVRM_DFS_MAX_SAMPLES_LOG2);
+ for (j = 0; j < NV_ARRAY_SIZE(pDomainSampler->Cycles); j++)
+ {
+ pDomainSampler->Cycles[j] = cycles;
+ }
+ pDomainSampler->TotalActiveCycles = NewTotalCycles;
+ pDomainSampler->AverageKHz = AverageKHz;
+ }
+}
+
+static void
+DfsClockFreqGet(
+ NvRmDeviceHandle hRmDevice,
+ NvRmDfsFrequencies* pDfsKHz)
+{
+ NvU32 i;
+
+ switch (s_Platform)
+ {
+ case ExecPlatform_Soc:
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ NvRmPrivAp15DfsClockFreqGet(hRmDevice, pDfsKHz);
+ else if (hRmDevice->ChipId.Id == 0x20)
+ NvRmPrivAp20DfsClockFreqGet(hRmDevice, pDfsKHz);
+ else
+ NV_ASSERT(!"Unsupported chip ID");
+ break;
+
+ case ExecPlatform_Fpga:
+ for (i = 1; i < NvRmDfsClockId_Num; i++)
+ {
+ // Set fixed FPGA frequency (default: AP15 FPGA)
+ if (hRmDevice->ChipId.Id == 0x20)
+ pDfsKHz->Domains[i] = AP20_FPGA_FREQ;
+ else
+ pDfsKHz->Domains[i] = AP15_FPGA_FREQ;
+ }
+ break;
+
+ default:
+ NV_ASSERT(!"Not supported execution platform for DFS");
+ }
+}
+
+static NvBool
+DfsClockConfigure(
+ NvRmDeviceHandle hRmDevice,
+ const NvRmDfsFrequencies* pMaxKHz,
+ NvRmDfsFrequencies* pDfsKHz)
+{
+ NvU32 i;
+
+ switch (s_Platform)
+ {
+ case ExecPlatform_Soc:
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ return NvRmPrivAp15DfsClockConfigure(
+ hRmDevice, pMaxKHz, pDfsKHz);
+ else if (hRmDevice->ChipId.Id == 0x20)
+ return NvRmPrivAp20DfsClockConfigure(
+ hRmDevice, pMaxKHz, pDfsKHz);
+ else
+ NV_ASSERT(!"Unsupported chip ID");
+ break;
+
+ case ExecPlatform_Fpga:
+ for (i = 1; i < NvRmDfsClockId_Num; i++)
+ {
+ // Set fixed FPGA frequency (default: AP15 FPGA)
+ if (hRmDevice->ChipId.Id == 0x20)
+ pDfsKHz->Domains[i] = AP20_FPGA_FREQ;
+ else
+ pDfsKHz->Domains[i] = AP15_FPGA_FREQ;
+ }
+ break;
+ default:
+ NV_ASSERT(!"Not supported execution platform for DFS");
+ }
+ return NV_TRUE; // configuration completed
+}
+
+static void
+DfsClipCpuEmcHighLimits(
+ NvRmDeviceHandle hRmDevice,
+ NvRmFreqKHz* pCpuHighKHz,
+ NvRmFreqKHz* pEmcHighKHz)
+{
+ if ((hRmDevice->ChipId.Id == 0x15) || (hRmDevice->ChipId.Id == 0x16))
+ NvRmPrivAp15ClipCpuEmcHighLimits(hRmDevice, pCpuHighKHz, pEmcHighKHz);
+ else if (hRmDevice->ChipId.Id == 0x20)
+ NvRmPrivAp20ClipCpuEmcHighLimits(hRmDevice, pCpuHighKHz, pEmcHighKHz);
+ else
+ NV_ASSERT(!"Unsupported chip ID");
+}
+
+/*****************************************************************************/
+
+static void
+DttGetTcorePolicy(
+ NvS32 TemperatureC,
+ const NvRmDfs* pDfs,
+ NvS32* pLowLimit,
+ NvS32* pHighLimit,
+ NvU32* pPollMs)
+{
+ if (pDfs->hRm->ChipId.Id == 0x20)
+ {
+ NvRmPrivAp20DttGetTcorePolicy(TemperatureC, &pDfs->ThermalThrottler,
+ pLowLimit, pHighLimit, pPollMs);
+ NV_ASSERT(*pLowLimit != ODM_TMON_PARAMETER_UNSPECIFIED);
+ NV_ASSERT(*pHighLimit != ODM_TMON_PARAMETER_UNSPECIFIED);
+ return;
+ }
+ // Default 1-second polling policy
+ *pLowLimit = ODM_TMON_PARAMETER_UNSPECIFIED;
+ *pHighLimit = ODM_TMON_PARAMETER_UNSPECIFIED;
+ *pPollMs = 1000;
+}
+
+static NvBool
+DttClockUpdate(
+ const NvRmDfs* pDfs,
+ NvRmDtt* pDtt,
+ NvRmDfsFrequencies* pDfsKHz)
+{
+ NvU32 msec;
+ NvS32 TemperatureC;
+ NvS32 LowLimit, HighLimit;
+
+ // Check if thermal throttling is supported
+ if (NVRM_DTT_DISABLED || (!pDtt->hOdmTcore))
+ return NV_FALSE;
+
+ // Update temperature
+ if (pDtt->UpdateFlag &&
+ NvOdmTmonTemperatureGet(pDtt->hOdmTcore, &TemperatureC))
+ {
+ // Register TMON interrupt, if it is supported by device, and chip
+ // policy, but has not been registered yet. Set initial temperature
+ // limits according to chip specific policy.
+ if (pDtt->UseIntr && !pDtt->hOdmTcoreIntr)
+ {
+ DttGetTcorePolicy(TemperatureC, pDfs, &LowLimit, &HighLimit, &msec);
+ if ((LowLimit != ODM_TMON_PARAMETER_UNSPECIFIED) &&
+ (HighLimit != ODM_TMON_PARAMETER_UNSPECIFIED))
+ {
+ if(NvOdmTmonParameterConfig(pDtt->hOdmTcore,
+ NvOdmTmonConfigParam_IntrLimitLow, &LowLimit) &&
+ NvOdmTmonParameterConfig(pDtt->hOdmTcore,
+ NvOdmTmonConfigParam_IntrLimitHigh, &HighLimit))
+ {
+ pDtt->hOdmTcoreIntr = NvOdmTmonIntrRegister(
+ pDtt->hOdmTcore, DttIntrCallback, (void*)pDfs);
+ }
+ }
+ if (!pDtt->hOdmTcoreIntr)
+ pDtt->UseIntr = NV_FALSE; // registration failed - use polling
+ }
+
+ // Update temperature monitoring policy
+ DttGetTcorePolicy(TemperatureC, pDfs, &LowLimit, &HighLimit, &msec);
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ pDtt->CoreTemperatureC = TemperatureC;
+ pDtt->RdIntervalUs = msec * 1000;
+ if (pDfs->DfsRunState > NvRmDfsRunState_Stopped)
+ {
+ pDtt->UpdateFlag = NV_FALSE;
+ }
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ }
+
+ // Throttle clock frequencies, if necessary
+ if (pDfs->hRm->ChipId.Id == 0x20)
+ return NvRmPrivAp20DttClockUpdate(
+ pDfs->hRm, pDtt->CoreTemperatureC, &pDfs->CurrentKHz, pDfsKHz);
+ else
+ return NV_FALSE; // No throttling policy for this chip ID
+}
+
+static void DttIntrCallback(void* args)
+{
+ NvU32 msec;
+ NvS32 TemperatureC = 0;
+ NvS32 LowLimit = ODM_TMON_PARAMETER_UNSPECIFIED;
+ NvS32 HighLimit = ODM_TMON_PARAMETER_UNSPECIFIED;
+ NvRmDfs* pDfs = (NvRmDfs*)args;
+ NvRmDtt* pDtt = &pDfs->ThermalThrottler;
+
+ if (NvOdmTmonTemperatureGet(pDtt->hOdmTcore, &TemperatureC))
+ {
+ DttGetTcorePolicy(TemperatureC, pDfs, &LowLimit, &HighLimit, &msec);
+ NV_ASSERT(LowLimit != ODM_TMON_PARAMETER_UNSPECIFIED);
+ NV_ASSERT(HighLimit != ODM_TMON_PARAMETER_UNSPECIFIED);
+
+ // Clear interrupt condition by setting new limits "around" temperature
+ (void)NvOdmTmonParameterConfig(pDtt->hOdmTcore,
+ NvOdmTmonConfigParam_IntrLimitLow, &LowLimit);
+ (void)NvOdmTmonParameterConfig(pDtt->hOdmTcore,
+ NvOdmTmonConfigParam_IntrLimitHigh, &HighLimit);
+ }
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ pDtt->UpdateFlag = NV_TRUE;
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+
+ NVRM_DFS_PRINTF(("Dtt Intr: T = %d, LowLimit = %d, HighLimit = %d\n",
+ TemperatureC, LowLimit, HighLimit));
+}
+
+/*****************************************************************************/
+// DFS PRIVATE INTERFACES
+/*****************************************************************************/
+
+NvError NvRmPrivDfsInit(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvError error;
+ NvRmDfsFrequencies DfsKHz;
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ DfsHintsPrintInit();
+
+ NvOsMemset(pDfs, 0, sizeof(NvRmDfs));
+ pDfs->hRm = hRmDeviceHandle;
+ s_Platform = NvRmPrivGetExecPlatform(hRmDeviceHandle);
+ s_DfsLogOn = NV_FALSE;
+
+ /*
+ * Set DFS IRQ invalid to avoid accidental deregeistration of somebody's
+ * else IRQ in case of DFS initialization error. Clear DFS clock control
+ * execution thread state variables
+ */
+ pDfs->IrqNumber = NVRM_IRQ_INVALID;
+ pDfs->InitializedThread = NV_FALSE;
+ pDfs->AbortThread = NV_FALSE;
+ pDfs->BusySyncState = NvRmDfsBusySyncState_Idle;
+ pDfs->PmRequest = NvRmPmRequest_None;
+
+ // DFS interrupt handler mutex
+ error = NvOsIntrMutexCreate(&pDfs->hIntrMutex);
+ if (error != NvSuccess)
+ {
+ goto failed;
+ }
+
+ // DFS algorithm parameters and clock limits
+ DfsParametersInit(pDfs);
+
+ /*
+ * DFS is always disabled in QT and Sim execution environments,
+ * when DFS testing is disabled. The initial DFS state for AP15 SoC and
+ * FPGA is specified by the respective macros.
+ */
+ pDfs->DfsRunState = NvRmDfsRunState_Disabled;
+ switch (s_Platform)
+ {
+ case ExecPlatform_Soc:
+ if ((pDfs->hRm->ChipId.Id == 0x15) || (pDfs->hRm->ChipId.Id == 0x16))
+ pDfs->DfsRunState = NVRM_AP15_SOC_INITIAL_DFS_STATE;
+ else if (pDfs->hRm->ChipId.Id == 0x20)
+ pDfs->DfsRunState = NVRM_AP20_SOC_INITIAL_DFS_STATE;
+ break;
+ case ExecPlatform_Fpga:
+ pDfs->DfsRunState = NVRM_FPGA_INITIAL_DFS_STATE;
+ break;
+ default:
+ break;
+ }
+ pDfs->DfsLPxSavedState = pDfs->DfsRunState;
+ if (pDfs->DfsRunState == NvRmDfsRunState_Disabled)
+ {
+ // If DFS disabled abort initialization and exit
+ return NvSuccess;
+ }
+
+ // DFS signaling semaphore
+ error = NvOsSemaphoreCreate(&pDfs->hSemaphore, 0);
+ if (error != NvSuccess)
+ {
+ goto failed;
+ }
+ // Register DFS as power client and obtain client id
+ error = NvRmPowerRegister(hRmDeviceHandle, pDfs->hSemaphore, &pDfs->PowerClientId);
+ if (error != NvSuccess)
+ {
+ goto failed;
+ }
+
+ // DFS busy hints synchronization objects
+ error = NvOsMutexCreate(&pDfs->hSyncBusyMutex);
+ if (error != NvSuccess)
+ {
+ goto failed;
+ }
+ error = NvOsSemaphoreCreate(&pDfs->hSyncBusySemaphore, 0);
+ if (error != NvSuccess)
+ {
+ goto failed;
+ }
+
+ /*
+ * Get DFS modules capbilities, check which activity monitors are
+ * supported, and initialize monitor access function pointers. Then
+ * initialize DFS samples and H/w monitors
+ */
+ error = DfsGetModulesCapabilities(pDfs);
+ if (error != NvSuccess)
+ {
+ goto failed;
+ }
+ DfsClockFreqGet(hRmDeviceHandle, &DfsKHz);
+ DfsSamplersInit(&DfsKHz, pDfs);
+ error = DfsHwInit(pDfs);
+ if (error != NvSuccess)
+ {
+ goto failed;
+ }
+
+ /*
+ * Configure System Statistic module interrupt, which will be used to
+ * trigger DFS algorithm execution
+ */
+ {
+ pDfs->IrqNumber = NvRmGetIrqForLogicalInterrupt(hRmDeviceHandle,
+ NVRM_MODULE_ID(NvRmModuleID_SysStatMonitor, 0),
+ 0);
+ }
+ if (!pDfs->DfsInterruptHandle)
+ {
+ NvU32 IrqList = (NvU32)pDfs->IrqNumber;
+ NvOsInterruptHandler hDfsIsr = DfsIsr;
+ error = NvRmInterruptRegister(hRmDeviceHandle, 1,
+ &IrqList, &hDfsIsr, pDfs, &pDfs->DfsInterruptHandle, NV_TRUE);
+ if (error != NvSuccess)
+ {
+ // Set IRQ invalid to avoid deregistration of other module interrupt
+ pDfs->IrqNumber = NVRM_IRQ_INVALID;
+ goto failed;
+ }
+ }
+
+ /*
+ * Provided DFS is initialized in running state, start sampling for the
+ * next sampling interval based on current DFS domain frtequencies and
+ * enable DFS interrupt
+ */
+ if (pDfs->DfsRunState > NvRmDfsRunState_Stopped)
+ {
+ DfsStartMonitors(
+ pDfs, &pDfs->CurrentKHz, pDfs->SamplingWindow.NextIntervalMs);
+ }
+ return NvSuccess;
+
+failed:
+ NvRmPrivDfsDeinit(hRmDeviceHandle);
+ return error;
+}
+
+void NvRmPrivDfsDeinit(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+ NV_ASSERT(hRmDeviceHandle);
+
+ // Release all DFS resources
+ NvRmInterruptUnregister(hRmDeviceHandle, pDfs->DfsInterruptHandle);
+ pDfs->DfsInterruptHandle = NULL;
+ DfsThreadTerminate(pDfs);
+ DfsHwDeinit(pDfs);
+ NvOsSemaphoreDestroy(pDfs->hSyncBusySemaphore);
+ NvOsMutexDestroy(pDfs->hSyncBusyMutex);
+ NvRmPowerUnRegister(hRmDeviceHandle, pDfs->PowerClientId);
+ NvOsSemaphoreDestroy(pDfs->hSemaphore);
+ NvOsIntrMutexDestroy(pDfs->hIntrMutex);
+ NvOsMemset(pDfs, 0, sizeof(NvRmDfs));
+}
+
+NvRmFreqKHz NvRmPrivDfsGetMaxKHz(NvRmDfsClockId ClockId)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+ NV_ASSERT((0 < ClockId) && (ClockId < NvRmDfsClockId_Num));
+ return pDfs->DfsParameters[ClockId].MaxKHz;
+}
+
+NvRmFreqKHz NvRmPrivDfsGetMinKHz(NvRmDfsClockId ClockId)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+ NV_ASSERT((0 < ClockId) && (ClockId < NvRmDfsClockId_Num));
+ return pDfs->DfsParameters[ClockId].MinKHz;
+}
+
+void NvRmPrivDfsSignal(NvRmDfsBusyHintSyncMode Mode)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+
+ // Just signal clock control thread for asynchronous busy hint or if the
+ // thread has not been created (no DFS execution at all)
+ if (!((Mode == NvRmDfsBusyHintSyncMode_Sync) && pDfs->InitializedThread))
+ {
+ NvOsSemaphoreSignal(pDfs->hSemaphore);
+ return;
+ }
+
+ // Signal clock control thread and wait for clock update before return
+ // to caller for synchronous busy hint
+ NvOsMutexLock(pDfs->hSyncBusyMutex);
+
+ pDfs->BusySyncState = NvRmDfsBusySyncState_Signal;
+ NvOsSemaphoreSignal(pDfs->hSemaphore);
+
+#if !DFS_SYNC_BUSY_TIMEOUT_MS
+ NvOsSemaphoreWait(pDfs->hSyncBusySemaphore);
+#else
+ if(NvError_Timeout == NvOsSemaphoreWaitTimeout(
+ pDfs->hSyncBusySemaphore, DFS_SYNC_BUSY_TIMEOUT_MS))
+ {
+ NvOsDebugPrintf("Syncronous busy hint timeout detected");
+ NV_ASSERT(0);
+ }
+#endif
+ NvOsMutexUnlock(pDfs->hSyncBusyMutex);
+}
+
+void NvRmPrivDfsResync(void)
+{
+ NvRmDfsFrequencies DfsKHz;
+ NvRmDfs* pDfs = &s_Dfs;
+
+ DfsClockFreqGet(pDfs->hRm, &DfsKHz);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ pDfs->CurrentKHz = DfsKHz;
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+}
+
+NvRmPmRequest NvRmPrivPmThread(void)
+{
+ return DfsThread(&s_Dfs);
+}
+
+void NvRmPrivStarvationHintPrintf(
+ NvU32 ClientId,
+ NvU32 ClientTag,
+ const NvRmDfsStarvationHint* pMultiHint,
+ NvU32 NumHints)
+{
+#if DFS_HINTS_PRINTF
+ {
+ NvU32 i;
+ char ClientName[sizeof(ClientTag)+ 1];
+ ClientTagToString(ClientTag, ClientName);
+
+ for (i = 0; i < NumHints; i++)
+ {
+ const NvRmDfsStarvationHint* pHint = &pMultiHint[i];
+ NvOsDebugPrintf("%s starvation hint: %s from client %3d (%s)\n",
+ s_DfsDomainNames[pHint->ClockId],
+ (pHint->Starving ? "TRUE " : "FALSE"),
+ ClientId, ClientName);
+ }
+ }
+#endif
+#if DFS_LOGGING_SECONDS
+ {
+ NvU32 i, SampleIndex;
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ if (s_DfsLogOn &&
+ ((s_DfsLogStarvationWrIndex + NumHints) < DFS_LOG_SIZE))
+ {
+ SampleIndex = s_DfsLogWrIndex;
+ for (i = 0; i < NumHints; i++)
+ {
+ DfsLogStarvationHint* pEntry =
+ &s_DfsLogStarvation[s_DfsLogStarvationWrIndex++];
+ pEntry->LogSampleIndex = SampleIndex;
+ pEntry->ClientId = ClientId;
+ pEntry->ClientTag = ClientTag;
+ pEntry->StarvationHint = pMultiHint[i];
+ }
+ }
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ }
+#endif
+}
+
+void NvRmPrivBusyHintPrintf(
+ NvU32 ClientId,
+ NvU32 ClientTag,
+ const NvRmDfsBusyHint* pMultiHint,
+ NvU32 NumHints)
+{
+#if DFS_HINTS_PRINTF
+ {
+ NvU32 i;
+ char ClientName[sizeof(ClientTag)+ 1];
+ ClientTagToString(ClientTag, ClientName);
+
+ for (i = 0; i < NumHints; i++)
+ {
+ const NvRmDfsBusyHint* pHint = &pMultiHint[i];
+ NvRmFreqKHz BoostKHz = (pHint->BoostKHz == NvRmFreqMaximum) ?
+ NvRmPrivDfsGetMaxKHz(pHint->ClockId) : pHint->BoostKHz;
+ NvOsDebugPrintf("%s busy hint: %6dkHz %4dms from client %3d (%s)\n",
+ s_DfsDomainNames[pHint->ClockId], BoostKHz,
+ pHint->BoostDurationMs, ClientId, ClientName);
+ }
+ }
+#endif
+#if DFS_LOGGING_SECONDS
+ {
+ NvU32 i, SampleIndex;
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ if (s_DfsLogOn && ((s_DfsLogBusyWrIndex + NumHints) < DFS_LOG_SIZE))
+ {
+ SampleIndex = s_DfsLogWrIndex;
+ for (i = 0; i < NumHints; i++)
+ {
+ DfsLogBusyHint* pEntry = &s_DfsLogBusy[s_DfsLogBusyWrIndex++];
+ pEntry->LogSampleIndex = SampleIndex;
+ pEntry->ClientId = ClientId;
+ pEntry->ClientTag = ClientTag;
+ pEntry->BusyHint = pMultiHint[i];
+ if (pEntry->BusyHint.BoostKHz == NvRmFreqMaximum)
+ pEntry->BusyHint.BoostKHz =
+ NvRmPrivDfsGetMaxKHz(pEntry->BusyHint.ClockId);
+ }
+ }
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ }
+#endif
+}
+
+/*****************************************************************************/
+// DVS PRIVATE INTERFACES
+/*****************************************************************************/
+
+static void
+DvsChangeCoreVoltage(
+ NvRmDeviceHandle hRm,
+ NvRmDvs* pDvs,
+ NvRmMilliVolts TargetMv)
+{
+ NvBool WasLow;
+ NvRmMilliVolts CurrentMv = pDvs->CurrentCoreMv;
+
+ NV_ASSERT(TargetMv >= pDvs->MinCoreMv);
+ NV_ASSERT(TargetMv <= pDvs->NominalCoreMv);
+
+ // Go from current to target voltage in safe steps keeping core and
+ // rtc volatges in synch (core voltage above rtc during transition)
+ while (CurrentMv != TargetMv)
+ {
+ WasLow = (CurrentMv < pDvs->LowSvopThresholdMv);
+
+ if (CurrentMv < TargetMv)
+ {
+ CurrentMv += NVRM_SAFE_VOLTAGE_STEP_MV;
+ if (CurrentMv > TargetMv)
+ CurrentMv = TargetMv;
+ NvRmPmuSetVoltage(hRm, pDvs->CoreRailAddress, CurrentMv, NULL);
+ if (pDvs->CoreRailAddress != pDvs->RtcRailAddress)
+ NvRmPmuSetVoltage(hRm, pDvs->RtcRailAddress, CurrentMv, NULL);
+ if (WasLow && (CurrentMv >= pDvs->LowSvopThresholdMv))
+ {
+ // Clear SVOP bits after crossing SVOP threshold up
+ NvRmPrivAp15SetSvopControls(hRm, pDvs->HighSvopSettings);
+ }
+ }
+ else
+ {
+ CurrentMv -= NVRM_SAFE_VOLTAGE_STEP_MV;
+ if (CurrentMv < TargetMv)
+ CurrentMv = TargetMv;
+ if (!WasLow && (CurrentMv < pDvs->LowSvopThresholdMv))
+ { // Set SVOP bits before crossing SVOP threshold down
+ NvRmPrivAp15SetSvopControls(hRm, pDvs->LowSvopSettings);
+ }
+ NvRmPmuSetVoltage(hRm, pDvs->RtcRailAddress, CurrentMv, NULL);
+ if (pDvs->CoreRailAddress != pDvs->RtcRailAddress)
+ NvRmPmuSetVoltage(hRm, pDvs->CoreRailAddress, CurrentMv, NULL);
+ }
+ }
+ pDvs->CurrentCoreMv = TargetMv;
+}
+
+static void
+DvsChangeCpuVoltage(
+ NvRmDeviceHandle hRm,
+ NvRmDvs* pDvs,
+ NvRmMilliVolts TargetMv)
+{
+ NV_ASSERT(TargetMv >= pDvs->MinCpuMv);
+ NV_ASSERT(TargetMv <= pDvs->NominalCpuMv);
+
+ if (pDvs->CurrentCpuMv != TargetMv)
+ {
+ NvRmPmuSetVoltage(hRm, pDvs->CpuRailAddress, TargetMv, NULL);
+ pDvs->CurrentCpuMv = TargetMv;
+ }
+}
+
+void NvRmPrivDvsInit(void)
+{
+ NvRmPmuVddRailCapabilities cap;
+ NvRmDfs* pDfs = &s_Dfs;
+ NvRmDvs* pDvs = &s_Dfs.VoltageScaler;
+ NvOdmPmuProperty PmuProperty = {0};
+
+ const NvOdmPeripheralConnectivity* pRtcRail =
+ NvOdmPeripheralGetGuid(NV_VDD_RTC_ODM_ID);
+ const NvOdmPeripheralConnectivity* pCoreRail =
+ NvOdmPeripheralGetGuid(NV_VDD_CORE_ODM_ID);
+
+ /* Some systems(ex. FPGA) does have power rail control. */
+ if (!pRtcRail || !pCoreRail)
+ return;
+
+ pDvs->NominalCoreMv = NvRmPrivGetNominalMV(pDfs->hRm);
+ pDvs->MinCoreMv = NvRmPrivSourceVscaleGetMV(pDfs->hRm, 0);
+ pDvs->LowCornerCoreMv = pDvs->MinCoreMv;
+ NvRmPrivGetSvopParameters(pDfs->hRm, &pDvs->LowSvopThresholdMv,
+ &pDvs->LowSvopSettings, &pDvs->HighSvopSettings);
+ pDvs->UpdateFlag = NV_FALSE;
+ pDvs->StopFlag = NV_FALSE;
+ pDvs->Lp2SyncOTPFlag = NV_FALSE;
+
+ // Get RTC rail address, check range and resolution
+ NV_ASSERT(pRtcRail && pRtcRail->NumAddress);
+ pDvs->RtcRailAddress = pRtcRail->AddressList[0].Address;
+ NvRmPmuGetCapabilities(pDfs->hRm, pDvs->RtcRailAddress, &cap);
+ NV_ASSERT((cap.StepMilliVolts) &&
+ (cap.StepMilliVolts <= NVRM_SAFE_VOLTAGE_STEP_MV));
+ NV_ASSERT(cap.MinMilliVolts <= pDvs->MinCoreMv);
+ NV_ASSERT(cap.MaxMilliVolts >= pDvs->NominalCoreMv);
+
+ // Get Core rail address, check range and resolution
+ NV_ASSERT(pCoreRail && pCoreRail->NumAddress);
+ pDvs->CoreRailAddress = pCoreRail->AddressList[0].Address;
+ NvRmPmuGetCapabilities(pDfs->hRm, pDvs->CoreRailAddress, &cap);
+ NV_ASSERT((cap.StepMilliVolts) &&
+ (cap.StepMilliVolts <= NVRM_SAFE_VOLTAGE_STEP_MV));
+ NV_ASSERT((cap.StepMilliVolts) &&
+ (cap.StepMilliVolts <= NVRM_CORE_RESOLUTION_MV));
+ NV_ASSERT(cap.MinMilliVolts <= pDvs->MinCoreMv);
+ NV_ASSERT(cap.MaxMilliVolts >= pDvs->NominalCoreMv);
+
+ if (NvRmPrivIsCpuRailDedicated(pDfs->hRm))
+ {
+ // Get dedicated CPU rail address, check range and resolution
+ const NvOdmPeripheralConnectivity* pCpuRail =
+ NvOdmPeripheralGetGuid(NV_VDD_CPU_ODM_ID);
+
+ pDvs->NominalCpuMv = NvRmPrivModuleVscaleGetMV(
+ pDfs->hRm, NvRmModuleID_Cpu, NvRmFreqMaximum);
+ pDvs->MinCpuMv = NvRmPrivModuleVscaleGetMV(pDfs->hRm, NvRmModuleID_Cpu,
+ NvRmPrivGetSocClockLimits(NvRmModuleID_Cpu)->MinKHz);
+ pDvs->LowCornerCpuMv = pDvs->MinCpuMv;
+
+ NV_ASSERT(pCpuRail && pCpuRail->NumAddress);
+ pDvs->CpuRailAddress = pCpuRail->AddressList[0].Address;
+ NvRmPmuGetCapabilities(pDfs->hRm, pDvs->CpuRailAddress, &cap);
+ NV_ASSERT((cap.StepMilliVolts) &&
+ (cap.StepMilliVolts <= NVRM_SAFE_VOLTAGE_STEP_MV));
+ NV_ASSERT((cap.StepMilliVolts) &&
+ (cap.StepMilliVolts <= NVRM_CORE_RESOLUTION_MV));
+ NV_ASSERT(cap.MinMilliVolts <= pDvs->MinCpuMv);
+ NV_ASSERT(cap.MaxMilliVolts >= pDvs->NominalCpuMv);
+ pDvs->CpuOTPMv = cap.requestMilliVolts;
+
+ // CPU rail behaviour after CPU request signal On-Off-On transition
+ if (NvOdmQueryGetPmuProperty(&PmuProperty))
+ pDvs->VCpuOTPOnWakeup = PmuProperty.VCpuOTPOnWakeup;
+
+ // Get dedicated CPU rail boot voltage
+ NvRmPmuGetVoltage(pDfs->hRm, pDvs->CpuRailAddress, &pDvs->CurrentCpuMv);
+ }
+
+ // Get boot core voltage. Check if DFS is disabled - no voltage scaling
+ // in this case. Otherwise, set nominal core and dedicated cpu voltages.
+ // Initialize DVS corner variables.
+ NvRmPmuGetVoltage(pDfs->hRm, pDvs->CoreRailAddress, &pDvs->CurrentCoreMv);
+ if ((pDfs->DfsRunState <= NvRmDfsRunState_Disabled))
+ {
+ pDvs->RtcRailAddress = pDvs->CoreRailAddress = 0;
+ return;
+ }
+ DvsChangeCoreVoltage(pDfs->hRm, pDvs, pDvs->NominalCoreMv);
+
+ if (NvRmPrivIsCpuRailDedicated(pDfs->hRm))
+ {
+ DvsChangeCpuVoltage(pDfs->hRm, pDvs, pDvs->NominalCpuMv);
+ pDvs->DvsCorner.CpuMv = pDvs->NominalCpuMv;
+
+ // No core scaling if CPU voltage is not preserved across LPx
+ if (pDvs->VCpuOTPOnWakeup)
+ pDvs->MinCoreMv = pDvs->NominalCoreMv;
+ }
+ else
+ {
+ pDvs->DvsCorner.CpuMv = pDvs->NominalCoreMv;
+ }
+ pDvs->DvsCorner.SystemMv = pDvs->NominalCoreMv;
+ pDvs->DvsCorner.EmcMv = pDvs->NominalCoreMv;
+ pDvs->DvsCorner.ModulesMv = pDvs->NominalCoreMv;
+
+ if ((pDfs->hRm->ChipId.Id == 0x15) || (pDfs->hRm->ChipId.Id == 0x16))
+ pDvs->LowCornerCoreMv = NV_MAX(NVRM_AP15_LOW_CORE_MV, pDvs->MinCoreMv);
+ else if (pDfs->hRm->ChipId.Id == 0x20)
+ {
+ pDvs->LowCornerCoreMv = NV_MAX(NVRM_AP20_LOW_CORE_MV, pDvs->MinCoreMv);
+ pDvs->LowCornerCpuMv = NV_MAX(NVRM_AP20_LOW_CPU_MV, pDvs->MinCpuMv);
+ }
+}
+
+void NvRmPrivVoltageScale(
+ NvBool BeforeFreqChange,
+ NvRmMilliVolts CpuMv,
+ NvRmMilliVolts SystemMv,
+ NvRmMilliVolts EmcMv)
+{
+ NvRmMilliVolts TargetMv;
+ NvRmDfs* pDfs = &s_Dfs;
+ NvRmDvs* pDvs = &s_Dfs.VoltageScaler;
+ NvBool DedicatedCpuRail = NvRmPrivIsCpuRailDedicated(pDfs->hRm);
+
+ /* Some systems(ex. FPGA) does have power rail control. */
+ if (!pDvs->RtcRailAddress || !pDvs->CoreRailAddress)
+ return;
+
+ // Record new DVS threshold and determine new target voltage as maximunm of
+ // all thresholds
+ pDvs->DvsCorner.CpuMv = CpuMv;
+ pDvs->DvsCorner.SystemMv = SystemMv;
+ pDvs->DvsCorner.EmcMv = EmcMv;
+ pDvs->DvsCorner.ModulesMv = NvRmPrivModulesGetOperationalMV(pDfs->hRm);
+ TargetMv = pDvs->DvsCorner.ModulesMv;
+ if (!DedicatedCpuRail && (TargetMv < CpuMv))
+ TargetMv = CpuMv;
+ if (TargetMv < SystemMv)
+ TargetMv = SystemMv;
+ if (TargetMv < EmcMv)
+ TargetMv = EmcMv;
+
+ // Clip new target voltage to core voltage limits
+ if (TargetMv > pDvs->NominalCoreMv)
+ TargetMv = pDvs->NominalCoreMv;
+ else if (TargetMv < pDvs->LowCornerCoreMv)
+ TargetMv = pDvs->LowCornerCoreMv;
+
+ if (DedicatedCpuRail)
+ {
+ // Clip new CPU voltage to CPU voltage limits
+ if (CpuMv > pDvs->NominalCpuMv)
+ CpuMv = pDvs->NominalCpuMv;
+ else if (CpuMv < pDvs->LowCornerCpuMv)
+ CpuMv = pDvs->LowCornerCpuMv;
+
+ // Increase voltage before changing frequency, and vice versa;
+ // Change core 1st before changing frequency, and vice versa
+ // (to guarantee required margin of core voltage over CPU voltage)
+ if (BeforeFreqChange)
+ {
+ if (pDvs->Lp2SyncOTPFlag)
+ {
+ // If required, synchronize DVFS state with CPU rail default
+ // level after LP2 exit
+ pDvs->Lp2SyncOTPFlag = NV_FALSE;
+ pDvs->CurrentCpuMv = pDvs->CpuOTPMv;
+ }
+ if (pDvs->CurrentCoreMv < TargetMv)
+ DvsChangeCoreVoltage(pDfs->hRm, pDvs, TargetMv);
+ if (pDvs->CurrentCpuMv < CpuMv)
+ DvsChangeCpuVoltage(pDfs->hRm, pDvs, CpuMv);
+ }
+ else
+ {
+ if (pDvs->CurrentCpuMv > CpuMv)
+ DvsChangeCpuVoltage(pDfs->hRm, pDvs, CpuMv);
+ if (pDvs->CurrentCoreMv > TargetMv)
+ DvsChangeCoreVoltage(pDfs->hRm, pDvs, TargetMv);
+ }
+ }
+ else
+ {
+ // Increase voltage before changing frequency, and vice versa
+ if ((BeforeFreqChange && (pDvs->CurrentCoreMv < TargetMv)) ||
+ (!BeforeFreqChange && (pDvs->CurrentCoreMv > TargetMv)))
+ {
+ DvsChangeCoreVoltage(pDfs->hRm, pDvs, TargetMv);
+ }
+ }
+}
+
+void NvRmPrivDvsRequest(NvRmMilliVolts TargetMv)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+ NvRmDvs* pDvs = &s_Dfs.VoltageScaler;
+
+ // Do nothing for unspecified target.
+ if (TargetMv == NvRmVoltsUnspecified)
+ return;
+
+ /* Some systems(ex. FPGA) does have power rail control. */
+ if (!pDvs->RtcRailAddress || !pDvs->CoreRailAddress)
+ return;
+
+ // Clip new target voltage to core voltage limits
+ if (TargetMv > pDvs->NominalCoreMv)
+ TargetMv = pDvs->NominalCoreMv;
+ else if (TargetMv < pDvs->LowCornerCoreMv)
+ TargetMv = pDvs->LowCornerCoreMv;
+
+ // If new target voltage is above current - update immediately. If target
+ // is below current voltage - just set update flag, so that next DFS ISR
+ // signals DFS thread, which checks operational voltage for all modules.
+ if (TargetMv > pDvs->CurrentCoreMv)
+ {
+ DvsChangeCoreVoltage(pDfs->hRm, pDvs, TargetMv);
+ }
+ else if (TargetMv < pDvs->CurrentCoreMv)
+ {
+ pDvs->UpdateFlag = NV_TRUE;
+ }
+}
+
+static void NvRmPrivDvsStopAtNominal(void)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+ NvRmDvs* pDvs = &s_Dfs.VoltageScaler;
+
+ /* Some systems(ex. FPGA) does have power rail control. */
+ if (!pDvs->RtcRailAddress || !pDvs->CoreRailAddress)
+ return;
+
+ // Set nominal voltage
+ DvsChangeCoreVoltage(pDfs->hRm, pDvs, pDvs->NominalCoreMv);
+ if(NvRmPrivIsCpuRailDedicated(pDfs->hRm))
+ DvsChangeCpuVoltage(pDfs->hRm, pDvs, pDvs->NominalCpuMv);
+}
+
+static void NvRmPrivDvsRun(void)
+{
+ NvRmDvs* pDvs = &s_Dfs.VoltageScaler;
+ pDvs->StopFlag = NV_FALSE;
+}
+
+void NvRmPrivDfsSuspend(NvOdmSocPowerState state)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+ NvBool UpdateClocks = NV_FALSE;
+ NvRmDfsFrequencies DfsKHz;
+
+ // Fill in target frequencies for suspend state on the 1st entry
+ // (use invalid domain frequency as 1st flag)
+ if (pDfs->SuspendKHz.Domains[0] == 0)
+ {
+ if ((pDfs->hRm->ChipId.Id == 0x15) || (pDfs->hRm->ChipId.Id == 0x16))
+ NvRmPrivAp15DfsVscaleFreqGet(
+ pDfs->hRm, NVRM_AP15_SUSPEND_CORE_MV, &pDfs->SuspendKHz);
+ else if (pDfs->hRm->ChipId.Id == 0x20)
+ pDfs->SuspendKHz = pDfs->LowCornerKHz; // TODO: AP20 suspend corner
+ else
+ pDfs->SuspendKHz = pDfs->LowCornerKHz; // Low corner by default
+ pDfs->SuspendKHz.Domains[0] = NvRmFreqMaximum;
+ }
+
+ NvRmPrivLockSharedPll();
+ if (state == NvOdmSocPowerState_DeepSleep)
+ {
+ // On entry to deep sleeep (LP0): set nominal voltage level and
+ // stop DVFS at nominal voltage until resume.
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ pDfs->DfsLPxSavedState = pDfs->DfsRunState;
+ if (pDfs->DfsLPxSavedState > NvRmDfsRunState_Stopped)
+ pDfs->DfsRunState = NvRmDfsRunState_Stopped;
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+
+ NvRmPrivDvsStopAtNominal();
+ pDfs->VoltageScaler.StopFlag = NV_TRUE;
+ }
+ else if (state == NvOdmSocPowerState_Suspend)
+ {
+ // On entry to suspend (LP1): set target frequencies for all DFS
+ // clock domains, stop DFS monitors, and then configure clocks and
+ // core voltage. Stop DVFS in suspend corner until resume.
+ DfsKHz = pDfs->SuspendKHz;
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ pDfs->DfsLPxSavedState = pDfs->DfsRunState;
+ if (pDfs->DfsLPxSavedState > NvRmDfsRunState_Stopped)
+ {
+ pDfs->DfsRunState = NvRmDfsRunState_Stopped;
+ pDfs->TargetKHz = DfsKHz;
+ UpdateClocks = NV_TRUE;
+ }
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+
+ for (; UpdateClocks;)
+ {
+ if (DfsClockConfigure(pDfs->hRm, &pDfs->MaxKHz, &DfsKHz))
+ {
+ pDfs->CurrentKHz = DfsKHz; // DFS is already stopped - no mutex
+ break;
+ }
+ DfsKHz = pDfs->SuspendKHz;
+ }
+ pDfs->VoltageScaler.StopFlag = NV_TRUE;
+ }
+ NvRmPrivUnlockSharedPll();
+}
+
+/*****************************************************************************/
+// DTT PRIVATE INTERFACES
+/*****************************************************************************/
+
+void NvRmPrivDttInit(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+ NvRmDtt* pDtt = &pDfs->ThermalThrottler;
+
+ // Make sure TMON h/w is initialized
+ pDtt->hOdmTcore = NvOdmTmonDeviceOpen(NvOdmTmonZoneID_Core);
+
+ // No thermal throttling if DFS is disabled, otherwise start DTTS
+ if (pDfs->DfsRunState < NvRmDfsRunState_Stopped)
+ {
+ NvOdmTmonDeviceClose(pDtt->hOdmTcore);
+ pDtt->hOdmTcore = NULL;
+ return;
+ }
+
+ if (!pDtt->hOdmTcore)
+ {
+ // TODO: uncomment after AP20 bring-up
+ if (pDfs->hRm->ChipId.Id == 0x20)
+ {
+ // NV_ASSERT(!"TMON is a must on AP20 platform");
+ }
+ return;
+ }
+ NvOdmTmonCapabilitiesGet(pDtt->hOdmTcore, &pDtt->TcoreCaps);
+ NvOdmTmonParameterCapsGet(pDtt->hOdmTcore,
+ NvOdmTmonConfigParam_IntrLimitLow, &pDtt->TcoreLowLimitCaps);
+ NvOdmTmonParameterCapsGet(pDtt->hOdmTcore,
+ NvOdmTmonConfigParam_IntrLimitHigh, &pDtt->TcoreHighLimitCaps);
+
+ if (pDtt->TcoreCaps.IntrSupported &&
+ !pDtt->TcoreLowLimitCaps.OdmProtected &&
+ !pDtt->TcoreHighLimitCaps.OdmProtected)
+ {
+ // Sanity checks to make sure out-of-limit interrupt is available in
+ // the entire temperature range
+ NV_ASSERT(pDtt->TcoreLowLimitCaps.MinValue <= pDtt->TcoreCaps.Tmin);
+ NV_ASSERT(pDtt->TcoreHighLimitCaps.MinValue <= pDtt->TcoreCaps.Tmin);
+ NV_ASSERT(pDtt->TcoreLowLimitCaps.MaxValue >= pDtt->TcoreCaps.Tmax);
+ NV_ASSERT(pDtt->TcoreHighLimitCaps.MaxValue >= pDtt->TcoreCaps.Tmax);
+#if NVRM_DTT_USE_INTERRUPT
+ pDtt->UseIntr = NV_TRUE;
+#endif
+ }
+#if !NVRM_DTT_DISABLED
+ pDtt->UpdateFlag = NV_TRUE;
+#endif
+}
+
+void NvRmPrivDttDeinit()
+{
+ NvRmDfs* pDfs = &s_Dfs;
+ NvOdmTmonDeviceHandle hOdmTcore = pDfs->ThermalThrottler.hOdmTcore;
+ NvOdmTmonIntrHandle hOdmTcoreIntr = pDfs->ThermalThrottler.hOdmTcoreIntr;
+
+ NvOdmTmonIntrUnregister(hOdmTcore, hOdmTcoreIntr);
+ pDfs->ThermalThrottler.hOdmTcoreIntr = NULL;
+
+ NvOdmTmonDeviceClose(hOdmTcore);
+ pDfs->ThermalThrottler.hOdmTcore = NULL;
+}
+
+/*****************************************************************************/
+// DFS PUBLIC INTERFACES
+/*****************************************************************************/
+
+NvRmDfsRunState
+NvRmDfsGetState(
+ NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvRmDfsRunState state;
+ NvRmDfs* pDfs = &s_Dfs;
+ NV_ASSERT(hRmDeviceHandle);
+
+ if(!pDfs->hIntrMutex)
+ return NvRmDfsRunState_Invalid;
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ state = pDfs->DfsRunState;
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return state;
+}
+
+NvError
+NvRmDfsSetState(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmDfsRunState NewDfsRunState)
+{
+ NvRmDfsRunState OldDfsRunState;
+ NvRmDfsFrequencies DfsKHz;
+ NvError error = NvSuccess;
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+ NV_ASSERT((0 < NewDfsRunState) && (NewDfsRunState < NvRmDfsRunState_Num));
+
+ NvRmPrivLockSharedPll();
+ DfsClockFreqGet(hRmDeviceHandle, &DfsKHz);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ OldDfsRunState = pDfs->DfsRunState;
+
+ // No transition from disabled state is supported
+ if (OldDfsRunState == NvRmDfsRunState_Disabled)
+ NewDfsRunState = NvRmDfsRunState_Invalid;
+
+ /*
+ * State transition procedures
+ */
+ switch (NewDfsRunState)
+ {
+ // On transition to running states from stopped state samplers are
+ // initialized and restarted; if profiled loop is supported and it
+ // is specified as a new state, profile is initialized as well
+#if DFS_PROFILING
+ case NvRmDfsRunState_ProfiledLoop:
+ DfsProfileInit(pDfs);
+ // fall through
+#endif
+ case NvRmDfsRunState_ClosedLoop:
+ pDfs->DfsRunState = NewDfsRunState;
+ if (OldDfsRunState == NvRmDfsRunState_Stopped)
+ {
+ DfsSamplersInit(&DfsKHz, pDfs);
+ DfsStartMonitors(
+ pDfs, &pDfs->CurrentKHz, pDfs->SamplingWindow.NextIntervalMs);
+ }
+ break;
+
+ // On transition to stopped state just stop DFS targets at whatever
+ // frequency they are now
+ case NvRmDfsRunState_Stopped:
+ pDfs->DfsRunState = NewDfsRunState;
+ break;
+
+ // Not supported transition
+ default:
+ error = NvError_NotSupported;
+ break;
+ }
+ pDfs->DfsLPxSavedState = pDfs->DfsRunState;
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ NvRmPrivUnlockSharedPll();
+ return error;
+}
+
+NvError
+NvRmDfsSetLowCorner(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 DfsFreqListCount,
+ const NvRmFreqKHz* pDfsLowFreqList)
+{
+ NvU32 i;
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+ NV_ASSERT(DfsFreqListCount == NvRmDfsClockId_Num);
+ NV_ASSERT(pDfsLowFreqList);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+
+ // Nothing to set if DFS is disabled
+ if (pDfs->DfsRunState == NvRmDfsRunState_Disabled)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_NotSupported;
+ }
+
+ // Clip requested low corner frequencies to domain limits and update
+ // DFS low corner (keep corner unchanged if new value is "unspecified")
+ for (i = 1; i < NvRmDfsClockId_Num; i++)
+ {
+ NvRmFreqKHz DomainKHz = pDfsLowFreqList[i];
+ // Preserve CPU or EMC low boundary when the respective envelope is set
+ if ((pDfs->CpuEnvelopeSet && (i == NvRmDfsClockId_Cpu)) ||
+ (pDfs->EmcEnvelopeSet && (i == NvRmDfsClockId_Emc)))
+ {
+ continue;
+ }
+ if (DomainKHz != NvRmFreqUnspecified)
+ {
+ if (DomainKHz < pDfs->DfsParameters[i].MinKHz)
+ {
+ DomainKHz = pDfs->DfsParameters[i].MinKHz;
+ }
+ else if (DomainKHz > pDfs->HighCornerKHz.Domains[i])
+ {
+ DomainKHz = pDfs->HighCornerKHz.Domains[i];
+ }
+ pDfs->LowCornerKHz.Domains[i] = DomainKHz;
+ if (i == NvRmDfsClockId_Cpu)
+ pDfs->CpuCornersShadow.MinKHz = DomainKHz;
+ }
+ }
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvSuccess;
+}
+
+NvError
+NvRmDfsSetAvHighCorner(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmFreqKHz DfsAvSystemHighKHz,
+ NvRmFreqKHz DfsAvpHighKHz,
+ NvRmFreqKHz DfsVpipeHighKHz)
+{
+ NvU32 i;
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+
+ // Nothing to set if DFS is disabled
+ if (pDfs->DfsRunState == NvRmDfsRunState_Disabled)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_NotSupported;
+ }
+
+ // Clip requested VDE high corner frequency to domain limits
+ // (keep corner unchanged if new value is "unspecified")
+ if (DfsVpipeHighKHz == NvRmFreqUnspecified)
+ DfsVpipeHighKHz = pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Vpipe];
+ else if (DfsVpipeHighKHz > pDfs->DfsParameters[NvRmDfsClockId_Vpipe].MaxKHz)
+ DfsVpipeHighKHz = pDfs->DfsParameters[NvRmDfsClockId_Vpipe].MaxKHz;
+ else if (DfsVpipeHighKHz < pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Vpipe])
+ DfsVpipeHighKHz = pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Vpipe];
+
+ // Clip requested AVP high corner frequency to domain limits
+ // (keep corner unchanged if new value is "unspecified")
+ if (DfsAvpHighKHz == NvRmFreqUnspecified)
+ DfsAvpHighKHz = pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Avp];
+ else if (DfsAvpHighKHz > pDfs->DfsParameters[NvRmDfsClockId_Avp].MaxKHz)
+ DfsAvpHighKHz = pDfs->DfsParameters[NvRmDfsClockId_Avp].MaxKHz;
+ else if (DfsAvpHighKHz < pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Avp])
+ DfsAvpHighKHz = pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Avp];
+
+
+ // Clip requested AVP/System high corner frequency to domain limits
+ // (keep corner unchanged if new value is "unspecified")
+ if (DfsAvSystemHighKHz == NvRmFreqUnspecified)
+ DfsAvSystemHighKHz = pDfs->HighCornerKHz.Domains[NvRmDfsClockId_System];
+ else if (DfsAvSystemHighKHz > pDfs->DfsParameters[NvRmDfsClockId_System].MaxKHz)
+ DfsAvSystemHighKHz = pDfs->DfsParameters[NvRmDfsClockId_System].MaxKHz;
+ else
+ { // System high boundary must be above all AV low boundaries
+ for (i = 1; i < NvRmDfsClockId_Num; i++)
+ {
+ if ((i != NvRmDfsClockId_Cpu) &&
+ (i != NvRmDfsClockId_Emc))
+ {
+ if ((i == NvRmDfsClockId_Vpipe) &&
+ (!NvRmPrivGetClockSourceHandle(NvRmClockSource_Vbus)))
+ continue; // Skip v-pipe if VDE clock is decoupled from AV
+
+ if (DfsAvSystemHighKHz < pDfs->LowCornerKHz.Domains[i])
+ DfsAvSystemHighKHz = pDfs->LowCornerKHz.Domains[i];
+ }
+ }
+ }
+
+ // Make sure new System and AVP, VDE high boundaries are consistent
+ if ((DfsAvSystemHighKHz < DfsVpipeHighKHz) &&
+ NvRmPrivGetClockSourceHandle(NvRmClockSource_Vbus))
+ {
+ DfsAvSystemHighKHz = DfsVpipeHighKHz;
+ }
+ if (DfsAvSystemHighKHz < DfsAvpHighKHz)
+ {
+ DfsAvSystemHighKHz = DfsAvpHighKHz;
+ }
+
+ // Finally update high corner
+ pDfs->HighCornerKHz.Domains[NvRmDfsClockId_System] = DfsAvSystemHighKHz;
+ pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Ahb] = NV_MIN(
+ DfsAvSystemHighKHz, pDfs->DfsParameters[NvRmDfsClockId_Ahb].MaxKHz);
+ pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Apb] = NV_MIN(
+ DfsAvSystemHighKHz, pDfs->DfsParameters[NvRmDfsClockId_Apb].MaxKHz);
+
+ pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Avp] = DfsAvpHighKHz;
+ pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Vpipe] = DfsVpipeHighKHz;
+
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvSuccess;
+}
+
+NvError
+NvRmDfsSetCpuEmcHighCorner(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmFreqKHz DfsCpuHighKHz,
+ NvRmFreqKHz DfsEmcHighKHz)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+
+ // Nothing to set if DFS is disabled
+ if (pDfs->DfsRunState == NvRmDfsRunState_Disabled)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_NotSupported;
+ }
+
+ // Preserve CPU and EMC high corners if either CPU or EMC envelope is set
+ if (pDfs->CpuEnvelopeSet || pDfs->EmcEnvelopeSet)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvSuccess;
+ }
+
+ // Keep corner unchanged if new requested value is "unspecified"
+ if (DfsCpuHighKHz == NvRmFreqUnspecified)
+ DfsCpuHighKHz = pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Cpu];
+ if (DfsEmcHighKHz == NvRmFreqUnspecified)
+ DfsEmcHighKHz = pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Emc];
+
+ // Clip requested CPU and EMC high corner frequencies to domain maximum
+ if (DfsCpuHighKHz > pDfs->DfsParameters[NvRmDfsClockId_Cpu].MaxKHz)
+ DfsCpuHighKHz = pDfs->DfsParameters[NvRmDfsClockId_Cpu].MaxKHz;
+ if (DfsEmcHighKHz > pDfs->DfsParameters[NvRmDfsClockId_Emc].MaxKHz)
+ DfsEmcHighKHz = pDfs->DfsParameters[NvRmDfsClockId_Emc].MaxKHz;
+
+ // Clip requested CPU and EMC high corner to supported EMC configuration
+ DfsClipCpuEmcHighLimits(
+ hRmDeviceHandle, &DfsCpuHighKHz, &DfsEmcHighKHz);
+
+ // Clip requested CPU and EMC frequencies to domain low limits
+ if (DfsCpuHighKHz < pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Cpu])
+ DfsCpuHighKHz = pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Cpu];
+ if (DfsEmcHighKHz < pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Emc])
+ DfsEmcHighKHz = pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Emc];
+
+ // Finally update high corner
+ pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Cpu] = DfsCpuHighKHz;
+ pDfs->CpuCornersShadow.MaxKHz = DfsCpuHighKHz;
+ pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Emc] = DfsEmcHighKHz;
+
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvSuccess;
+}
+
+NvError
+NvRmDfsSetCpuEnvelope(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmFreqKHz DfsCpuLowCornerKHz,
+ NvRmFreqKHz DfsCpuHighCornerKHz)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+
+ // Nothing to set if DFS is disabled
+ if (pDfs->DfsRunState == NvRmDfsRunState_Disabled)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_NotSupported;
+ }
+
+ // Preserve unspecified boundary, unless it violates new setting for
+ // the other one; set both boundaries equal in the latter case
+ if (DfsCpuLowCornerKHz == NvRmFreqUnspecified)
+ {
+ DfsCpuLowCornerKHz = pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Cpu];
+ if (DfsCpuLowCornerKHz > DfsCpuHighCornerKHz)
+ DfsCpuLowCornerKHz = DfsCpuHighCornerKHz;
+ }
+ if (DfsCpuHighCornerKHz == NvRmFreqUnspecified)
+ {
+ DfsCpuHighCornerKHz = pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Cpu];
+ if (DfsCpuLowCornerKHz > DfsCpuHighCornerKHz)
+ DfsCpuHighCornerKHz = DfsCpuLowCornerKHz;
+ }
+
+ // Can not set envelope with reversed boundaries
+ if (DfsCpuLowCornerKHz > DfsCpuHighCornerKHz)
+ {
+ NV_ASSERT(!"CPU envelope boundaries are reversed");
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_BadValue;
+ }
+
+ // Clip requested boundaries to CPU domain limits; mark envelope "set" if
+ // any requested boundary is inside the limits
+ pDfs->CpuEnvelopeSet = NV_FALSE; // assume envelope is open
+
+ if (DfsCpuLowCornerKHz <= pDfs->DfsParameters[NvRmDfsClockId_Cpu].MinKHz)
+ DfsCpuLowCornerKHz = pDfs->DfsParameters[NvRmDfsClockId_Cpu].MinKHz;
+ else
+ {
+ pDfs->CpuEnvelopeSet = NV_TRUE; // envelope sealed
+ if (DfsCpuLowCornerKHz >= pDfs->DfsParameters[NvRmDfsClockId_Cpu].MaxKHz)
+ DfsCpuLowCornerKHz = pDfs->DfsParameters[NvRmDfsClockId_Cpu].MaxKHz;
+ }
+
+ if (DfsCpuHighCornerKHz >= pDfs->DfsParameters[NvRmDfsClockId_Cpu].MaxKHz)
+ DfsCpuHighCornerKHz = pDfs->DfsParameters[NvRmDfsClockId_Cpu].MaxKHz;
+ else
+ {
+ pDfs->CpuEnvelopeSet = NV_TRUE; // envelope sealed
+ if (DfsCpuHighCornerKHz <= pDfs->DfsParameters[NvRmDfsClockId_Cpu].MinKHz)
+ DfsCpuHighCornerKHz = pDfs->DfsParameters[NvRmDfsClockId_Cpu].MinKHz;
+ }
+ // Shadow new limits before they may be throttled by EMC
+ pDfs->CpuCornersShadow.MinKHz = DfsCpuLowCornerKHz;
+ pDfs->CpuCornersShadow.MaxKHz = DfsCpuHighCornerKHz;
+
+ // If EMC envelope is set, move (throttle) CPU envelope as necessary
+ if (pDfs->EmcEnvelopeSet)
+ {
+ NvRmFreqKHz EmcHighKHz = pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Emc];
+ DfsClipCpuEmcHighLimits(
+ hRmDeviceHandle, &DfsCpuHighCornerKHz, &EmcHighKHz);
+ NV_ASSERT(EmcHighKHz == pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Emc]);
+ if (DfsCpuLowCornerKHz > DfsCpuHighCornerKHz)
+ DfsCpuLowCornerKHz = DfsCpuHighCornerKHz;
+ }
+
+ // Finally update CPU limits
+ pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Cpu] = DfsCpuLowCornerKHz;
+ pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Cpu] = DfsCpuHighCornerKHz;
+
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvSuccess;
+}
+
+NvError
+NvRmDfsSetEmcEnvelope(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmFreqKHz DfsEmcLowCornerKHz,
+ NvRmFreqKHz DfsEmcHighCornerKHz)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+
+ // Nothing to set if DFS is disabled
+ if (pDfs->DfsRunState == NvRmDfsRunState_Disabled)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_NotSupported;
+ }
+
+ // Preserve unspecified boundary, unless it violates new setting for
+ // the other one; set both boundaries equal in the latter case
+ if (DfsEmcLowCornerKHz == NvRmFreqUnspecified)
+ {
+ DfsEmcLowCornerKHz = pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Emc];
+ if (DfsEmcLowCornerKHz > DfsEmcHighCornerKHz)
+ DfsEmcLowCornerKHz = DfsEmcHighCornerKHz;
+ }
+ if (DfsEmcHighCornerKHz == NvRmFreqUnspecified)
+ {
+ DfsEmcHighCornerKHz = pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Emc];
+ if (DfsEmcLowCornerKHz > DfsEmcHighCornerKHz)
+ DfsEmcHighCornerKHz = DfsEmcLowCornerKHz;
+ }
+
+ // Can not set envelope with reversed boundaries
+ if (DfsEmcLowCornerKHz > DfsEmcHighCornerKHz)
+ {
+ NV_ASSERT(!"EMC envelope boundaries are reversed");
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_BadValue;
+ }
+
+ // Clip requested boundaries to EMC domain limits; mark envelope "set" if
+ // any requested boundary is inside the limits
+ pDfs->EmcEnvelopeSet = NV_FALSE; // assume envelope is open
+
+ if (DfsEmcLowCornerKHz <= pDfs->DfsParameters[NvRmDfsClockId_Emc].MinKHz)
+ DfsEmcLowCornerKHz = pDfs->DfsParameters[NvRmDfsClockId_Emc].MinKHz;
+ else
+ {
+ pDfs->EmcEnvelopeSet = NV_TRUE; // envelope sealed
+ if (DfsEmcLowCornerKHz >= pDfs->DfsParameters[NvRmDfsClockId_Emc].MaxKHz)
+ DfsEmcLowCornerKHz = pDfs->DfsParameters[NvRmDfsClockId_Emc].MaxKHz;
+ }
+
+ if (DfsEmcHighCornerKHz >= pDfs->DfsParameters[NvRmDfsClockId_Emc].MaxKHz)
+ DfsEmcHighCornerKHz = pDfs->DfsParameters[NvRmDfsClockId_Emc].MaxKHz;
+ else
+ {
+ pDfs->EmcEnvelopeSet = NV_TRUE; // envelope sealed
+ if (DfsEmcHighCornerKHz <= pDfs->DfsParameters[NvRmDfsClockId_Emc].MinKHz)
+ DfsEmcHighCornerKHz = pDfs->DfsParameters[NvRmDfsClockId_Emc].MinKHz;
+ }
+
+ // Restore CPU corners from shadow. If set, clip EMC envelope to the supported
+ // EMC configuration, and throttle CPU corners as necessary
+ pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Cpu] = pDfs->CpuCornersShadow.MinKHz;
+ pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Cpu] = pDfs->CpuCornersShadow.MaxKHz;
+ if (pDfs->EmcEnvelopeSet)
+ {
+ NvRmFreqKHz CpuHighKHz = pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Cpu];
+ DfsClipCpuEmcHighLimits(
+ hRmDeviceHandle, &CpuHighKHz, &DfsEmcHighCornerKHz);
+ if (DfsEmcLowCornerKHz > DfsEmcHighCornerKHz)
+ DfsEmcLowCornerKHz = DfsEmcHighCornerKHz;
+
+ if (pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Cpu] > CpuHighKHz)
+ {
+ pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Cpu] = CpuHighKHz;
+ if (pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Cpu] > CpuHighKHz)
+ pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Cpu] = CpuHighKHz;
+ }
+ }
+ // Finally update EMC limits
+ pDfs->LowCornerKHz.Domains[NvRmDfsClockId_Emc] = DfsEmcLowCornerKHz;
+ pDfs->HighCornerKHz.Domains[NvRmDfsClockId_Emc] = DfsEmcHighCornerKHz;
+
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvSuccess;
+}
+
+NvError
+NvRmDfsSetTarget(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 DfsFreqListCount,
+ const NvRmFreqKHz* pDfsTargetFreqList)
+{
+ NvU32 i;
+ NvRmDfsFrequencies DfsKHz;
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+ NV_ASSERT(DfsFreqListCount == NvRmDfsClockId_Num);
+ NV_ASSERT(pDfsTargetFreqList);
+
+ NvRmPrivLockSharedPll();
+ DfsClockFreqGet(hRmDeviceHandle, &DfsKHz);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+
+ // Do nothing if DFS is not stopped (disabled or running)
+ if (pDfs->DfsRunState != NvRmDfsRunState_Stopped)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ NvRmPrivUnlockSharedPll();
+ return NvError_NotSupported;
+ }
+
+ // Clip requested target frequencies to domain limits
+ // (keep current frequency as a target if new value is "unspecified")
+ for (i = 1; i < NvRmDfsClockId_Num; i++)
+ {
+ NvRmFreqKHz DomainKHz = pDfsTargetFreqList[i];
+ if (DomainKHz != NvRmFreqUnspecified)
+ {
+ if (DomainKHz < pDfs->LowCornerKHz.Domains[i])
+ {
+ DomainKHz = pDfs->LowCornerKHz.Domains[i];
+ }
+ else if (DomainKHz > pDfs->HighCornerKHz.Domains[i])
+ {
+ DomainKHz = pDfs->HighCornerKHz.Domains[i];
+ }
+ DfsKHz.Domains[i] = DomainKHz;
+ }
+ }
+
+ // Set target and signal clock control thread ("manual clock control")
+ pDfs->TargetKHz = DfsKHz;
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ NvRmPrivUnlockSharedPll();
+ NvOsSemaphoreSignal(pDfs->hSemaphore);
+ return NvSuccess;
+}
+
+NvError
+NvRmDfsGetClockUtilization(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmDfsClockId ClockId,
+ NvRmDfsClockUsage* pClockUsage)
+{
+ NvRmDfsFrequencies DfsKHz;
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+ NV_ASSERT(pClockUsage);
+ NV_ASSERT((0 < ClockId) && (ClockId < NvRmDfsClockId_Num));
+
+ NvRmPrivLockSharedPll();
+ DfsClockFreqGet(hRmDeviceHandle, &DfsKHz);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+
+ // If DFS is not running - update current frequencies directly from h/w
+ if (pDfs->DfsRunState <= NvRmDfsRunState_Stopped)
+ {
+ pDfs->CurrentKHz = DfsKHz;
+ if (pDfs->Samplers[ClockId].MonitorPresent)
+ pDfs->Samplers[ClockId].AverageKHz = DfsKHz.Domains[ClockId];
+ }
+ // Update clock info
+ pClockUsage->MinKHz = pDfs->DfsParameters[ClockId].MinKHz;
+ pClockUsage->MaxKHz = pDfs->DfsParameters[ClockId].MaxKHz;
+ pClockUsage->LowCornerKHz = pDfs->LowCornerKHz.Domains[ClockId];
+ pClockUsage->HighCornerKHz = pDfs->HighCornerKHz.Domains[ClockId];
+ pClockUsage->CurrentKHz = pDfs->CurrentKHz.Domains[ClockId];
+ pClockUsage->AverageKHz = pDfs->Samplers[ClockId].AverageKHz;
+
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ NvRmPrivUnlockSharedPll();
+ return NvSuccess;
+}
+
+NvError
+NvRmDfsGetProfileData(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 DfsProfileCount,
+ NvU32* pSamplesNoList,
+ NvU32* pProfileTimeUsList,
+ NvU32* pDfsPeriodUs)
+{
+#if DFS_PROFILING
+ NvU32 i;
+ NvRmDfs* pDfs = &s_Dfs;
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+ NV_ASSERT(pProfileTimeUsList && pSamplesNoList && pDfsPeriodUs);
+ NV_ASSERT(DfsProfileCount == NvRmDfsProfileId_Num);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+
+ // Nothing to return if DFS is not in profiled loop
+ if (pDfs->DfsRunState != NvRmDfsRunState_ProfiledLoop)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_NotSupported;
+ }
+ // Return profile data
+ for (i = 1; i < DfsProfileCount; i++)
+ {
+ pSamplesNoList[i] = s_Profile.SamplesNo[i];
+ pProfileTimeUsList[i] = s_Profile.AccumulatedUs[i];
+ }
+ *pDfsPeriodUs = pDfs->SamplingWindow.SampleWindowMs * 1000 /
+ NV_ARRAY_SIZE(pDfs->SamplingWindow.IntervalsMs);
+
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvSuccess;
+
+#else
+
+ return NvError_NotSupported;
+#endif
+}
+
+void
+NvRmDfsLogStart(NvRmDeviceHandle hRmDeviceHandle)
+{
+ NvU32 i;
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ s_DfsLogOn = NV_TRUE;
+
+ for (i = 1; i < NvRmDfsClockId_Num; i++)
+ {
+ pDfs->Samplers[i].CumulativeLogCycles = 0;
+ }
+ pDfs->SamplingWindow.CumulativeLogMs = 0;
+ pDfs->SamplingWindow.CumulativeLp2TimeMs = 0;
+ pDfs->SamplingWindow.CumulativeLp2Entries = 0;
+
+#if DFS_LOGGING_SECONDS
+ s_DfsLogWrIndex = 0;
+ s_DfsLogStarvationWrIndex = 0;
+ s_DfsLogBusyWrIndex = 0;
+#endif
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+}
+
+NvError
+NvRmDfsLogGetMeanFrequencies(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 LogMeanFreqListCount,
+ NvRmFreqKHz* pLogMeanFreqList,
+ NvU32* pLogLp2TimeMs,
+ NvU32* pLogLp2Entries)
+{
+ NvU32 i, msec;
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+ NV_ASSERT(LogMeanFreqListCount == NvRmDfsClockId_Num);
+ NV_ASSERT(pLogMeanFreqList);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+ s_DfsLogOn = NV_FALSE;
+
+ // No logging if DFS is disabled
+ if (pDfs->DfsRunState == NvRmDfsRunState_Disabled)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_NotSupported;
+ }
+
+ // Return cumulative mean frequencies: (Kcycles/ms) * 1000 = kHz;
+ // (if log never started or running more than 49 days return 0)
+ msec = pDfs->SamplingWindow.CumulativeLogMs;
+ for (i = 1; i < LogMeanFreqListCount; i++)
+ {
+ pLogMeanFreqList[i] =
+ (NvU32)NvDiv64(pDfs->Samplers[i].CumulativeLogCycles, msec);
+ }
+ // TODO: update if condition SystemKHz = AvpKHz changes
+ pLogMeanFreqList[NvRmDfsClockId_System] =
+ pLogMeanFreqList[NvRmDfsClockId_Avp];
+
+ // Return cumulative LP2 statistic
+ *pLogLp2TimeMs = pDfs->SamplingWindow.CumulativeLp2TimeMs;
+ *pLogLp2Entries = pDfs->SamplingWindow.CumulativeLp2Entries;
+
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvSuccess;
+}
+
+NvError
+NvRmDfsLogActivityGetEntry(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 EntryIndex,
+ NvU32 LogDomainsCount,
+ NvU32* pIntervalMs,
+ NvU32* pLp2TimeMs,
+ NvU32* pActiveCyclesList,
+ NvRmFreqKHz* pAveragesList,
+ NvRmFreqKHz* pFrequenciesList)
+{
+#if DFS_LOGGING_SECONDS
+ NvU32 i;
+ DfsLogEntry* pEntry;
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+ NV_ASSERT(pFrequenciesList && pActiveCyclesList && pIntervalMs);
+ NV_ASSERT(LogDomainsCount == NvRmDfsClockId_Num);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+
+ // No logging if DFS is disabled
+ if (pDfs->DfsRunState == NvRmDfsRunState_Disabled)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_NotSupported;
+ }
+
+ // Nothing to return if log is empty
+ if (EntryIndex >= s_DfsLogWrIndex)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_InvalidAddress;
+ }
+
+ // Return log data
+ NV_ASSERT(EntryIndex < DFS_LOG_SIZE);
+ pEntry = &s_DfsLog[EntryIndex];
+ for (i = 1; i < LogDomainsCount; i++)
+ {
+ pFrequenciesList[i] = pEntry->CurrentKHz.Domains[i];
+ pAveragesList[i] = pEntry->AverageKHz.Domains[i];
+ pActiveCyclesList[i] = pEntry->ActiveCycles[i];
+ }
+ *pIntervalMs = pEntry->SampleIntervalMs;
+ *pLp2TimeMs = pEntry->Lp2TimeMs;
+
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvSuccess;
+
+#else
+
+ return NvError_NotSupported;
+#endif
+}
+
+NvError
+NvRmDfsLogStarvationGetEntry(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 EntryIndex,
+ NvU32* pSampleIndex,
+ NvU32* pClientId,
+ NvU32* pClientTag,
+ NvRmDfsStarvationHint* pStarvationHint)
+{
+#if DFS_LOGGING_SECONDS
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+ NV_ASSERT(pSampleIndex && pStarvationHint);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+
+ // No logging if DFS is disabled
+ if (pDfs->DfsRunState == NvRmDfsRunState_Disabled)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_NotSupported;
+ }
+
+ // Nothing to return if requested entry index is empty
+ if (EntryIndex >= s_DfsLogStarvationWrIndex)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_InvalidAddress;
+ }
+
+ // Return log data
+ NV_ASSERT(EntryIndex < DFS_LOG_SIZE);
+ *pSampleIndex = s_DfsLogStarvation[EntryIndex].LogSampleIndex;
+ *pClientId = s_DfsLogStarvation[EntryIndex].ClientId;
+ *pClientTag = s_DfsLogStarvation[EntryIndex].ClientTag;
+ *pStarvationHint = s_DfsLogStarvation[EntryIndex].StarvationHint;
+
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvSuccess;
+
+#else
+
+ return NvError_NotSupported;
+#endif
+}
+
+NvError
+NvRmDfsLogBusyGetEntry(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 EntryIndex,
+ NvU32* pSampleIndex,
+ NvU32* pClientId,
+ NvU32* pClientTag,
+ NvRmDfsBusyHint* pBusyHint)
+{
+#if DFS_LOGGING_SECONDS
+ NvRmDfs* pDfs = &s_Dfs;
+
+ NV_ASSERT(hRmDeviceHandle);
+ NV_ASSERT(pDfs->hIntrMutex);
+ NV_ASSERT(pSampleIndex && pBusyHint);
+
+ NvOsIntrMutexLock(pDfs->hIntrMutex);
+
+ // No logging if DFS is disabled
+ if (pDfs->DfsRunState == NvRmDfsRunState_Disabled)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_NotSupported;
+ }
+
+ // Nothing to return if requested entry index is empty
+ if (EntryIndex >= s_DfsLogBusyWrIndex)
+ {
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvError_InvalidAddress;
+ }
+
+ // Return log data
+ NV_ASSERT(EntryIndex < DFS_LOG_SIZE);
+ *pSampleIndex = s_DfsLogBusy[EntryIndex].LogSampleIndex;
+ *pClientId = s_DfsLogBusy[EntryIndex].ClientId;
+ *pClientTag = s_DfsLogBusy[EntryIndex].ClientTag;
+ *pBusyHint = s_DfsLogBusy[EntryIndex].BusyHint;
+
+ NvOsIntrMutexUnlock(pDfs->hIntrMutex);
+ return NvSuccess;
+
+#else
+
+ return NvError_NotSupported;
+#endif
+}
+
+/*****************************************************************************/
+// DVS PUBLIC INTERFACES
+/*****************************************************************************/
+
+void
+NvRmDfsGetLowVoltageThreshold(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmDfsVoltageRailId RailId,
+ NvRmMilliVolts* pLowMv,
+ NvRmMilliVolts* pPresentMv)
+{
+ NvRmDvs* pDvs = &s_Dfs.VoltageScaler;
+
+ NV_ASSERT(hRmDeviceHandle);
+
+ NvRmPrivLockSharedPll();
+ switch (RailId)
+ {
+ case NvRmDfsVoltageRailId_Core:
+ *pLowMv = pDvs->LowCornerCoreMv;
+ *pPresentMv = pDvs->CurrentCoreMv;
+ break;
+
+ case NvRmDfsVoltageRailId_Cpu:
+ if (NvRmPrivIsCpuRailDedicated(hRmDeviceHandle))
+ {
+ *pLowMv = pDvs->LowCornerCpuMv;
+ *pPresentMv = pDvs->CurrentCpuMv;
+ break;
+ }
+ // fall through
+
+ default:
+ *pLowMv = NvRmVoltsUnspecified;
+ *pPresentMv = NvRmVoltsUnspecified;
+ break;
+ }
+ NvRmPrivUnlockSharedPll();
+}
+
+void
+NvRmDfsSetLowVoltageThreshold(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmDfsVoltageRailId RailId,
+ NvRmMilliVolts LowMv)
+{
+ NvRmDvs* pDvs = &s_Dfs.VoltageScaler;
+
+ NV_ASSERT(hRmDeviceHandle);
+
+ // Low threshold is not specified - exit
+ if (LowMv == NvRmVoltsUnspecified)
+ return;
+
+ NvRmPrivLockSharedPll();
+
+ switch (RailId)
+ {
+ case NvRmDfsVoltageRailId_Core:
+ // Clip specified voltage level to core voltage range,
+ // and update low voltage settings
+ if (LowMv > pDvs->NominalCoreMv)
+ LowMv = pDvs->NominalCoreMv;
+ else if (LowMv < pDvs->MinCoreMv)
+ LowMv = pDvs->MinCoreMv;
+ pDvs->LowCornerCoreMv = LowMv;
+ pDvs->UpdateFlag = NV_TRUE;
+ break;
+
+ case NvRmDfsVoltageRailId_Cpu:
+ if (NvRmPrivIsCpuRailDedicated(hRmDeviceHandle))
+ {
+ // Clip specified voltage level to CPU voltage range,
+ // and update low voltage settings
+ if (LowMv > pDvs->NominalCpuMv)
+ LowMv = pDvs->NominalCpuMv;
+ else if (LowMv < pDvs->MinCpuMv)
+ LowMv = pDvs->MinCpuMv;
+ pDvs->LowCornerCpuMv = LowMv;
+ pDvs->UpdateFlag = NV_TRUE;
+ }
+ break;
+
+ default:
+ break;
+ }
+ NvRmPrivUnlockSharedPll();
+}
+
+/*****************************************************************************/
+// DTT PUBLIC INTERFACES
+/*****************************************************************************/
+
+NvError
+NvRmDiagGetTemperature(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmTmonZoneId ZoneId,
+ NvS32* pTemperatureC)
+{
+ NvRmDtt* pDtt = &s_Dfs.ThermalThrottler;
+
+ NV_ASSERT(hRmDeviceHandle);
+
+ switch (ZoneId)
+ {
+ case NvRmTmonZoneId_Core:
+ if (pDtt->hOdmTcore)
+ {
+ if (NvOdmTmonTemperatureGet(pDtt->hOdmTcore, pTemperatureC))
+ return NvSuccess;
+ return NvError_Busy;
+ }
+ // fall through
+ default:
+ return NvError_NotSupported;
+ }
+}
+
+/*****************************************************************************/
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.h
new file mode 100644
index 000000000000..21d7c9e52765
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.h
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Power Resource manager </b>
+ *
+ * @b Description: NvRM DFS manager definitions.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_POWER_DFS_H
+#define INCLUDED_NVRM_POWER_DFS_H
+
+#include "nvrm_power_private.h"
+#include "nvrm_clocks.h"
+#include "nvrm_interrupt.h"
+#include "nvodm_tmon.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+/**
+ * Sampling window definitions:
+ * - minimum and maximum sampling interval in ms
+ * - maximum number of intervals in the sampling window
+ * (always defined as power of 2 to simplify calculations)
+ */
+#define NVRM_DFS_MIN_SAMPLE_MS (10)
+#define NVRM_DFS_MAX_SAMPLE_MS (20)
+
+#define NVRM_DFS_MAX_SAMPLES_LOG2 (7)
+#define NVRM_DFS_MAX_SAMPLES (0x1 << NVRM_DFS_MAX_SAMPLES_LOG2)
+
+/// Specifies that CPU idle monitor readings should be explicitly offset
+/// by time spent in LP2
+#define NVRM_CPU_IDLE_LP2_OFFSET (1)
+
+/// Number of bits in the fractional part of boost koefficients
+#define BOOST_FRACTION_BITS (8)
+
+/*****************************************************************************/
+
+/// Enumerates synchronous busy hints states
+typedef enum
+{
+ NvRmDfsBusySyncState_Idle = 0,
+ NvRmDfsBusySyncState_Signal,
+ NvRmDfsBusySyncState_Execute,
+
+ NvRmDfsBusySyncState_Num,
+ NvRmDfsBusySyncState_Force32 = 0x7FFFFFFF
+} NvRmDfsBusySyncState;
+
+/// Enumerates DFS modules = modules, which include activity monitors for clock
+/// domains controlled by DFS
+typedef enum
+{
+ // Specifies system statistic module - includes activity monitors
+ // for CPU, AVP, AHB, and APB clock domains
+ NvRmDfsModuleId_Systat = 1,
+
+ // Specifies VDE module - includes activity monitor
+ // for video-pipe clock domain
+ NvRmDfsModuleId_Vde,
+
+ // Specifies EMC module - includes activity monitor
+ // for EMC 1x clock domain
+ NvRmDfsModuleId_Emc,
+
+ NvRmDfsModuleId_Num,
+ NvRmDfsModuleId_Force32 = 0x7FFFFFFF
+} NvRmDfsModuleId;
+
+/**
+ * Combines idle count readings from DFS activity monitors during current
+ * sample interval
+ */
+typedef struct NvRmDfsIdleDataRec
+{
+ // Current Sample interval in ms
+ NvU32 CurrentIntervalMs;
+
+ // Data readings from DFS activity monitors
+ NvU32 Readings[NvRmDfsClockId_Num];
+
+ // Time spent in LP2 in ms
+ NvU32 Lp2TimeMs;
+} NvRmDfsIdleData;
+
+/**
+ * DFS module access function pointers
+ */
+typedef struct NvRmDfsRec* NvRmDfsPtr;
+typedef const struct NvRmDfsRec* NvRmConstDfsPtr;
+typedef NvError (*FuncPtrModuleMonitorsInit)(NvRmDfsPtr pDfs);
+typedef void (*FuncPtrModuleMonitorsDeinit)(NvRmDfsPtr pDfs);
+
+typedef void
+(*FuncPtrModuleMonitorsStart)(
+ NvRmConstDfsPtr pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ const NvU32 IntevalMs);
+
+typedef void
+(*FuncPtrModuleMonitorsRead)(
+ NvRmConstDfsPtr pDfs,
+ const NvRmDfsFrequencies* pDfsKHz,
+ NvRmDfsIdleData* pIdleData);
+
+/**
+ * Combines capabilities, access function pointers, and base virtual
+ * addresses of the DFS module
+ */
+typedef struct NvRmDfsModuleRec
+{
+ // Clock domains monitored by this module
+ NvBool DomainMap[NvRmDfsClockId_Num];
+
+ // Pointer to the function that initializes module activity monitors
+ // (null if module is not present)
+ FuncPtrModuleMonitorsInit Init;
+
+ // Pointer to the function that de-initializes module activity monitors
+ // (null if module is not present)
+ FuncPtrModuleMonitorsDeinit Deinit;
+
+ // Pointer to the function that starts module activity monitors
+ // (null if module is not present)
+ FuncPtrModuleMonitorsStart Start;
+
+ // Pointer to the function that reads module activity monitors
+ // (null if module is not present)
+ FuncPtrModuleMonitorsRead Read;
+
+ // Monitor readouts scale and offset (usage and interpretation may differ
+ // for different monitors)
+ NvU32 Scale;
+ NvU32 Offset;
+
+ // Base virtual address for module registers
+ void* pBaseReg;
+} NvRmDfsModule;
+
+/*****************************************************************************/
+
+/**
+ * Combines DFS starvation control parameters
+ */
+typedef struct NvRmDfsStarveParamRec
+{
+ // Fixed increase in frequency boost for a sample interval the clock
+ // consumer is starving: new boost = old boost + BoostStepKHz
+ NvRmFreqKHz BoostStepKHz;
+
+ // Proportional increase in frequency boost for a sample interval the
+ // clock consumer is starving (scaled in 0-255 range):
+ // new boost = old boost + old boost * BoostIncKoef / 256
+ NvU8 BoostIncKoef;
+
+ // Proportional decrease in frequency boost for a sample interval the
+ // clock consumer is not starving (scaled in 0-255 range):
+ // new boost = old boost - old boost * BoostDecKoef / 256
+ NvU8 BoostDecKoef;
+} NvRmDfsStarveParam;
+
+
+/**
+ * Combines scaling algorithm parameters for DFS controlled clock domain
+ */
+typedef struct NvRmDfsParamRec
+{
+ // Maximum domain clock frequency
+ NvRmFreqKHz MaxKHz;
+ // Minimum domain clock frequency
+ NvRmFreqKHz MinKHz;
+
+ // Minimum average activity change in upward direction recognized by DFS
+ NvRmFreqKHz UpperBandKHz;
+ // Minimum average activity change in downward direction recognized by DFS
+ NvRmFreqKHz LowerBandKHz;
+
+ // Control parameters for real time starvation reported by the DFS client
+ NvRmDfsStarveParam RtStarveParam;
+
+ // Control parameters for non real time starvation detected by DFS itself
+ NvRmDfsStarveParam NrtStarveParam;
+
+ // Relative adjustment up of average activity applied by DFS:
+ // adjusted frequency = measured average activity * (1 + 2^(-RelAdjustBits))
+ NvU8 RelAdjustBits;
+
+ // Minimum number of sample intervals in a row with non-realtime starvation
+ // that triggers frequency boost (0 = boost trigger on the 1st NRT interval)
+ NvU8 MinNrtSamples;
+
+ // Minimum number of idle cycles in the sample interval required to avoid
+ // non-realtime starvation
+ NvU32 MinNrtIdleCycles;
+} NvRmDfsParam;
+
+/**
+ * Combines sampling statistic and starvation controls for DFS clock domain
+ */
+typedef struct NvRmDfsSamplerRec
+{
+ // Domain clock id
+ NvRmDfsClockId ClockId;
+
+ // Activity monitor present indicator (domain is still controlled by DFS
+ // even if no activity monitor present)
+ NvBool MonitorPresent;
+
+ // Circular buffer of active cycles per sample interval within the
+ // sampling window
+ NvU32 Cycles[NVRM_DFS_MAX_SAMPLES];
+
+ // Pointer to the last ("recent") sample in the sampling window
+ NvU32* pLastSample;
+
+ // Total number of active cycles in the sampling window
+ NvU64 TotalActiveCycles;
+
+ // Measured average clock activity frequency over the sampling window
+ NvRmFreqKHz AverageKHz;
+
+ // Average clock frequency adjusted up by DFS
+ NvRmFreqKHz BumpedAverageKHz;
+
+ // Non-real time starving sample counter
+ NvU32 NrtSampleCounter;
+
+ // Non-real time starvation boost
+ NvRmFreqKHz NrtStarveBoostKHz;
+
+ // Real time starvation boost
+ NvRmFreqKHz RtStarveBoostKHz;
+
+ // Busy pulse mode indicator - if true, busy boost is completely removed
+ // after busy time has expired; if false, DFS averaging mechanism is used
+ // to gradually lower frequency after busy boost
+ NvBool BusyPulseMode;
+
+ // Cumulative number of cycles since log start
+ NvU64 CumulativeLogCycles;
+} NvRmDfsSampler;
+
+/**
+ * Holds information for DFS moving sampling window
+ */
+typedef struct NvRmDfsSampleWindowRec
+{
+ // Minimum sampling interval
+ NvU32 MinIntervalMs;
+
+ // Maximum sampling interval
+ NvU32 MaxIntervalMs;
+
+ // Next sample interval
+ NvU32 NextIntervalMs;
+
+ // Circular buffer of sample intervals in the sampling window
+ NvU32 IntervalsMs[NVRM_DFS_MAX_SAMPLES];
+
+ // Pointer to the last ("recent") sample unterval in the sampling window
+ NvU32* pLastInterval;
+
+ // Cumulative width of the sampling window
+ NvU32 SampleWindowMs;
+
+ // Last busy hints check time stamp
+ NvU32 BusyCheckLastUs;
+
+ // Delay before busy hints next check
+ NvU32 BusyCheckDelayUs;
+
+ // Free running sample counter
+ NvU32 SampleCnt;
+
+ // Cumulative DFS time since log start
+ NvU32 CumulativeLogMs;
+
+ // Cumulative LP2 statistic since log start
+ NvU32 CumulativeLp2TimeMs;
+ NvU32 CumulativeLp2Entries;
+} NvRmDfsSampleWindow;
+
+/*****************************************************************************/
+
+/**
+ * Holds voltage corner for DFS domains and non-DFS modules. Each voltage
+ * corner field specifies minimum core voltage required to run the respective
+ * device(s) at current clock frequency.
+ */
+typedef struct NvRmDvsCornerRec
+{
+ // CPU voltage requirements
+ NvRmMilliVolts CpuMv;
+
+ // AVP/System voltage requirements
+ NvRmMilliVolts SystemMv;
+
+ // EMC / DDR voltage requirements
+ NvRmMilliVolts EmcMv;
+
+ // Cumulative voltage requirements for non-DFS modules
+ NvRmMilliVolts ModulesMv;
+} NvRmDvsCorner;
+
+/**
+ * Combines voltage threshold and core rail status and control information
+ */
+typedef struct NvRmDvsRec
+{
+ // Current DVS voltage thresholds
+ NvRmDvsCorner DvsCorner;
+
+ // RTC (AO) rail address (PMU voltage id)
+ NvU32 RtcRailAddress;
+
+ // Core rail address (PMU voltage id)
+ NvU32 CoreRailAddress;
+
+ // Current core rail voltage
+ NvRmMilliVolts CurrentCoreMv;
+
+ // Nominal core rail voltage
+ NvRmMilliVolts NominalCoreMv;
+
+ // Minimum core rail voltage
+ NvRmMilliVolts MinCoreMv;
+
+ // Low corner voltage for core rail loaded by DVS control API
+ NvRmMilliVolts LowCornerCoreMv;
+
+ // Dedicated Cpu rail address (PMU voltage id)
+ NvU32 CpuRailAddress;
+
+ // Current dedicated CPU rail voltage
+ NvRmMilliVolts CurrentCpuMv;
+
+ // Nominal dedicated CPU rail voltage
+ NvRmMilliVolts NominalCpuMv;
+
+ // Minimum dedicated CPU rail voltage
+ NvRmMilliVolts MinCpuMv;
+
+ // Low corner voltage for CPU rail loaded by DVS control API
+ NvRmMilliVolts LowCornerCpuMv;
+
+ // OTP (default) dedicated CPU rail voltage
+ NvRmMilliVolts CpuOTPMv;
+
+ // Specifies whether or not CPU voltage will switch back to OTP
+ // (default) value after CPU request On-Off-On transition
+ NvBool VCpuOTPOnWakeup;
+
+ // RAM timing SVOP controls low voltage threshold
+ NvRmMilliVolts LowSvopThresholdMv;
+
+ // RAM timing SVOP controls low voltage setting
+ NvU32 LowSvopSettings;
+
+ // RAM timing SVOP controls high voltage setting
+ NvU32 HighSvopSettings;
+
+ // Request core voltage update
+ volatile NvBool UpdateFlag;
+
+ // Stop voltage scaling flag
+ volatile NvBool StopFlag;
+
+ // CPU LP2 state indicator (used on platforms with dedicated CPU rail that
+ // returns to default setting by PMU underneath DVFS on every LP2 exit)
+ volatile NvBool Lp2SyncOTPFlag;
+} NvRmDvs;
+
+/**
+ * Combines status and control information for dynamic thermal throttling
+ */
+typedef struct NvRmDttRec
+{
+ // SoC core temperature monitor (TMON) handle
+ NvOdmTmonDeviceHandle hOdmTcore;
+
+ // Core TMON out-of-limit-interrupt handle
+ NvOdmTmonIntrHandle hOdmTcoreIntr;
+
+ // Core TMON capabilities
+ NvOdmTmonCapabilities TcoreCaps;
+
+ // Out of limit interrupt cpabilities for low limit
+ NvOdmTmonParameterCaps TcoreLowLimitCaps;
+
+ // Out-of-limit interrupt cpabilities for high limit
+ NvOdmTmonParameterCaps TcoreHighLimitCaps;
+
+ // Last TMON reading time stamp
+ NvU32 RdTimeUs;
+
+ // TMON reading period
+ NvU32 RdIntervalUs;
+
+ // Core temperature
+ NvS32 CoreTemperatureC;
+
+ // Request core temperature update
+ volatile NvBool UpdateFlag;
+
+ // Specifies if out-of-limit interrupt is used for temperature update
+ volatile NvBool UseIntr;
+} NvRmDtt;
+
+/*****************************************************************************/
+
+/**
+ * Combines DFS status and control information
+ */
+typedef struct NvRmDfsRec
+{
+ // RM Device handle
+ NvRmDeviceHandle hRm;
+
+ // DFS state variable
+ NvRmDfsRunState DfsRunState;
+
+ // DFS state saved on system suspend entry
+ NvRmDfsRunState DfsLPxSavedState;
+
+ // ID assigned to DFS by RM Power Manager
+ NvU32 PowerClientId;
+
+ // DFS low power corner hit status - true, when all domains (with
+ // possible exception of CPU) are running at minimum frequency
+ NvBool LowCornerHit;
+
+ // Request to report low corner hit status to OS adaptation layer; DFS
+ // interrupt will not wake CPU if it is power gated and low corner is hit
+ NvBool LowCornerReport;
+
+ // PM thread request for CPU state control
+ NvRmPmRequest PmRequest;
+
+ // DFS IRQ number
+ NvU16 IrqNumber;
+
+ // DFS mutex for safe data access by DFS ISR,
+ // clock control thread, and API threads
+ NvOsIntrMutexHandle hIntrMutex;
+
+ // DFS mutex for synchronous busy hints
+ NvOsMutexHandle hSyncBusyMutex;
+
+ // DFS semaphore for synchronous busy hints
+ NvOsSemaphoreHandle hSyncBusySemaphore;
+
+ // Synchronous busy hints state
+ volatile NvRmDfsBusySyncState BusySyncState;
+
+ // Clock control execution thread init indicator
+ volatile NvBool InitializedThread;
+
+ // Clock control execution thread abort indicator
+ volatile NvBool AbortThread;
+
+ // DFS semaphore for sampling interrupt and wake event signaling
+ NvOsSemaphoreHandle hSemaphore;
+
+ // DFS Modules
+ NvRmDfsModule Modules[NvRmDfsModuleId_Num];
+
+ // DFS algorithm parameters
+ NvRmDfsParam DfsParameters[NvRmDfsClockId_Num];
+
+ // DFS Samplers
+ NvRmDfsSampler Samplers[NvRmDfsClockId_Num];
+
+ // DFS sampling window
+ NvRmDfsSampleWindow SamplingWindow;
+
+ // Maximum DFS domains frequencies (shortcut to the respective parameters)
+ NvRmDfsFrequencies MaxKHz;
+
+ // Target DFS doamins frequencies: output of the DFS algorithm,
+ // input to clock control
+ NvRmDfsFrequencies TargetKHz;
+
+ // Current DFS domains frequencies: output from clock control, input
+ // to DFS algorithm
+ NvRmDfsFrequencies CurrentKHz;
+
+ // DFS domains frequencies set on entry to suspend state
+ NvRmDfsFrequencies SuspendKHz;
+
+ // Busy boost frequencies requested by Busy load API
+ NvRmDfsFrequencies BusyKHz;
+
+ // Low corner frequencies loaded by DFS control API
+ NvRmDfsFrequencies LowCornerKHz;
+
+ // High corner frequencies loaded by DFS control API
+ NvRmDfsFrequencies HighCornerKHz;
+
+ // A shadow of CPU corners (updated by APIs that directly set CPU corners,
+ // preserved when CPU corners are indirectly throttled by EMC envelope)
+ NvRmModuleClockLimits CpuCornersShadow;
+
+ // CPU envelope API indicator (if set supercedes low/high corner APIs)
+ NvBool CpuEnvelopeSet;
+
+ // EMC envelope API indicator (if set supercedes low/high corner APIs)
+ NvBool EmcEnvelopeSet;
+
+ // Voltage Scaler
+ NvRmDvs VoltageScaler;
+
+ // Thermal throttler
+ NvRmDtt ThermalThrottler;
+
+ // nvos interrupt handle for DVS
+ NvOsInterruptHandle DfsInterruptHandle;
+} NvRmDfs;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // INCLUDED_NVRM_POWER_DFS_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_private.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_private.h
new file mode 100644
index 000000000000..692599040a93
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_private.h
@@ -0,0 +1,549 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_POWER_PRIVATE_H
+#define INCLUDED_NVRM_POWER_PRIVATE_H
+
+#include "nvrm_power.h"
+#include "nvodm_query.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+// Power detect cell stabilization delay
+#define NVRM_PWR_DET_DELAY_US (3)
+
+// Minimum DFS clock domain busy time and busy hints list purge time
+#define NVRM_DFS_BUSY_MIN_MS (10)
+#define NVRM_DFS_BUSY_PURGE_MS (500)
+
+// Temporary definitions for AP20 bring up
+#define NVRM_POWER_AP20_BRINGUP_RETURN(hRm, cond) \
+ if (((hRm)->ChipId.Id == 0x20) && ((cond))) \
+ return
+
+/**
+ * Defines the DFS status flags used by OS kernel to configure SoC for
+ * low power state (multiple flags can be OR'ed).
+ */
+typedef enum
+{
+ // Pause DFS during low power state
+ NvRmDfsStatusFlags_Pause = 0x01,
+
+ // Stop PLL during low power state
+ NvRmDfsStatusFlags_StopPllM0 = 0x02,
+ NvRmDfsStatusFlags_StopPllC0 = 0x04,
+ NvRmDfsStatusFlags_StopPllP0 = 0x08,
+ NvRmDfsStatusFlags_StopPllA0 = 0x10,
+ NvRmDfsStatusFlags_StopPllD0 = 0x20,
+ NvRmDfsStatusFlags_StopPllU0 = 0x40,
+ NvRmDfsStatusFlags_StopPllX0 = 0x80,
+
+ NvRmDfsStatusFlags_Force32 = 0x7FFFFFFF
+} NvRmDfsStatusFlags;
+
+// Defines maximum number of CPUs (must be power of 2)
+#define NVRM_MAX_NUM_CPU_LOG2 (8)
+
+/**
+ * Defines RM power manager requests to OS kernel
+ */
+typedef enum
+{
+ NvRmPmRequest_None = 0,
+
+ // The CPU number is interpreted based on the request flag it is
+ // combined (ORed) with
+ NvRmPmRequest_CpuNumMask = (0x1 << NVRM_MAX_NUM_CPU_LOG2) - 1,
+
+ // Request to abort RM power manager (CPU number is ignored)
+ NvRmPmRequest_ExitFlag,
+
+ // Request to turn On/Off CPU (CPU number specifies target
+ // CPU within current CPU cluster)
+ NvRmPmRequest_CpuOnFlag = NvRmPmRequest_ExitFlag << 1,
+ NvRmPmRequest_CpuOffFlag = NvRmPmRequest_CpuOnFlag << 1,
+
+ // Request to switch between CPU clusters (CPU number specifies target
+ // CPU cluster)
+ NvRmPmRequest_CpuClusterSwitchFlag = NvRmPmRequest_CpuOffFlag << 1,
+
+ NvRmPmRequest_Force32 = 0x7FFFFFFF
+} NvRmPmRequest;
+
+/**
+ * NVRM PM function called within OS shim high priority thread
+ */
+NvRmPmRequest NvRmPrivPmThread(void);
+
+/**
+ * Sets combined RM clients power state in the storage shared with OS
+ * adaptation layer (OAL). While the system is running RM power manger
+ * calls this function to specify idle or active state based on client
+ * requests. On entry to system low power state OAL calls this function
+ * to store the respective LPx id.
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param RmState The overall power state to be set
+ */
+void
+NvRmPrivPowerSetState(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvRmPowerState RmState);
+
+/**
+ * Reads combined RM clients power state from the storage shared with OS
+ * adaptation layer (OAL). While the system is running both RM and OAL may
+ * call this function to read the power state. On exit from the system low
+ * power state OAL uses this function to find out which LPx state is exited.
+ *
+ * @param hRmDeviceHandle The RM device handle
+ *
+ * @return RM power state
+ */
+NvRmPowerState
+NvRmPrivPowerGetState(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Updates DFS pause flag in the storage shared by RM and NV boot loader
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param Pause If NV_TRUE, set DFS pause flag,
+ * if NV_FALSE, clear DFS pause flag
+ *
+ */
+void
+NvRmPrivUpdateDfsPauseFlag(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvBool Pause);
+
+/**
+ * Reads DFS status flags from the storage shared by RM and NV boot loader.
+ *
+ * @param hRmDeviceHandle The RM device handle
+ *
+ * @return DFS status flags as defined @NvRmDfsStatusFlags
+ */
+NvU32
+NvRmPrivGetDfsFlags(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Sets download transport in the storage shared by RM and NV boot loader
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param Transport current download transport (NvOdmDownloadTransport_None
+ * if no transport or it is not active)
+ */
+void
+NvRmPrivSetDownloadTransport(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvOdmDownloadTransport Transport);
+
+/**
+ * Reads download transport from the storage shared by RM and NV boot loader.
+ *
+ * @param hRmDeviceHandle The RM device handle
+ *
+ * @return current download transport (NvOdmDownloadTransport_None
+ * if no transport or it is not active)
+ */
+NvOdmDownloadTransport
+NvRmPrivGetDownloadTransport(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Save LP2 time in the storage shared by RM and NV boot loader.
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param TimeUS Time in microseconds CPU was in LP2 state (power gated)
+ */
+void
+NvRmPrivSetLp2TimeUS(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 TimeUS);
+
+/**
+ * Reads LP2 time from the storage shared by RM and NV boot loader.
+ *
+ * @param hRmDeviceHandle The RM device handle
+ *
+ * @return Time in microseconds CPU was in LP2 state (power gated)
+ */
+NvU32
+NvRmPrivGetLp2TimeUS(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Initializes RM access to the storage shared by RM and NV boot loader
+ *
+ * @param hRmDeviceHandle The RM device handle
+ *
+ * @return NvSuccess if initialization completed successfully
+ * or one of common error codes on failure
+ */
+NvError NvRmPrivOalIntfInit(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Deinitializes RM access to the storage shared by RM and NV boot loader
+ *
+ * @param hRmDeviceHandle The RM device handle
+ */
+void NvRmPrivOalIntfDeinit(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Initializes RM DFS manager
+ *
+ * @param hRmDeviceHandle The RM device handle
+ *
+ * @return NvSuccess if initialization completed successfully
+ * or one of common error codes on failure
+ */
+NvError NvRmPrivDfsInit(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Deinitializes RM DFS manager
+ *
+ * @param hRmDeviceHandle The RM device handle
+ */
+void NvRmPrivDfsDeinit(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Initializes RM DTT manager
+ *
+ * @param hRmDeviceHandle The RM device handle
+ */
+void NvRmPrivDttInit(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Deinitializes RM DTT manager
+ */
+void NvRmPrivDttDeinit(void);
+
+/**
+ * Initializes RM power manager
+ *
+ * @param hRmDeviceHandle The RM device handle
+ *
+ * @return NvSuccess if initialization completed successfully
+ * or one of common error codes on failure
+ */
+NvError
+NvRmPrivPowerInit(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Deinitializes RM power manager
+ *
+ * @param hRmDeviceHandle The RM device handle
+ */
+void
+NvRmPrivPowerDeinit(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Initializes IO power rails control
+ *
+ * @param hRmDeviceHandle The RM device handle
+ */
+void NvRmPrivIoPowerControlInit(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Starts IO power rails level detection
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param PwrDetMask The bit mask of power detection cells to be activated
+ */
+void NvRmPrivIoPowerDetectStart(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 PwrDetMask);
+
+/**
+ * Resets enabled power detect cells (chip-specific).
+ *
+ * @param hRmDeviceHandle The RM device handle
+ */
+void NvRmPrivAp15IoPowerDetectReset(NvRmDeviceHandle hRmDeviceHandle);
+void NvRmPrivAp20IoPowerDetectReset(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Latches the results of IO power rails level detection
+ *
+ * @param hRmDeviceHandle The RM device handle
+ */
+void NvRmPrivIoPowerDetectLatch(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Enables/Disables IO pads on specified power rails
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param NoIoPwrMask Bit mask of affected power rails
+ * @param Enable Set NV_TRUE to enable IO pads, or NV_FALSE to disable.
+ */
+void NvRmPrivIoPowerControl(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 NoIoPwrMask,
+ NvBool Enable);
+
+/**
+ * Configures SoC power rail controls for the upcoming PMU voltage transition.
+ *
+ * @note Should be called just before PMU rail On/Off, or Off/On transition.
+ * Should not be called if rail voltage level is changing within On range.
+ *
+ * @param hDevice The Rm device handle.
+ * @param PmuRailAddress PMU address (id) for targeted power rail.
+ * @param Enable Set NV_TRUE if target voltage is about to be turned On, or
+ * NV_FALSE if target voltage is about to be turned Off.
+ * @param pIoPwrDetectMask A pointer to a variable filled with the bit mask
+ * of activated IO power detection cells to be latched by the caller after
+ * Off/On transition (set to 0 for On/Off transition).
+ * @param pNoIoPwrMask A pointer to a variable filled with the bit mask of IO
+ * power pads to be enabled by the caller after Off/On transition (set to 0
+ * for On/Off transition).
+ */
+void
+NvRmPrivSetSocRailPowerState(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 PmuRailAddress,
+ NvBool Enable,
+ NvU32* pIoPwrDetectMask,
+ NvU32* pNoIoPwrMask);
+
+/**
+ * Initializes core SoC power rail.
+ *
+ * @param hDevice The Rm device handle.
+ */
+void NvRmPrivCoreVoltageInit(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Request nominal core (and rtc) voltage.
+ *
+ * @param hRmDeviceHandle The RM device handle
+ */
+void
+NvRmPrivSetNominalCoreVoltage(NvRmDeviceHandle hRmDevice);
+
+/**
+ * Initializes power group control table (chip-specific)
+ *
+ * @param pPowerGroupIdsTable
+ * @param pPowerGroupIdsTable A pointer to a pointer which this function sets
+ * to the chip specific map between power group number and power gate ID.
+ * @param pPowerGroupIdsTableSize A pointer to a variable which this function
+ * sets to the power group IDs table size.
+ *
+ */
+void
+NvRmPrivAp15PowerGroupTableInit(
+ const NvU32** pPowerGroupIdsTable,
+ NvU32* pPowerGroupIdsTableSize);
+
+void
+NvRmPrivAp20PowerGroupTableInit(
+ const NvU32** pPowerGroupIdsTable,
+ NvU32* pPowerGroupIdsTableSize);
+
+/**
+ * Initializes power group control.
+ *
+ * @param hRmDeviceHandle The RM device handle
+ */
+void NvRmPrivPowerGroupControlInit(NvRmDeviceHandle hRmDeviceHandle);
+
+/**
+ * Enables/disables power for the specified power group
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param PowerGroup targeted power group
+ * @param Enable If NV_TRUE, enable power to the specified power group,
+ * if NV_FALSE, disable power (power gate) the specified power group
+ */
+void
+NvRmPrivPowerGroupControl(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 PowerGroup,
+ NvBool Enable);
+
+/**
+ * Retrieves given power group voltage
+ *
+ * @param hRmDeviceHandle The RM device handle
+ * @param PowerGroup targeted power group
+ *
+ * @return NvRmVoltsUnspecified if power group is On,
+ * and NvRmVoltsOff if it is power gated
+ */
+NvRmMilliVolts
+NvRmPrivPowerGroupGetVoltage(
+ NvRmDeviceHandle hRmDeviceHandle,
+ NvU32 PowerGroup);
+
+/**
+ * Verifies if the specified DFS clock domain is starving.
+ *
+ * @param ClockId The DFS ID of the clock domain to be checked.
+ *
+ * @retval NV_TRUE if domain is starving
+ * @retval NV_FALSE if domain is not starving
+ */
+NvBool NvRmPrivDfsIsStarving(NvRmDfsClockId ClockId);
+
+/**
+ * Gets current busy boost frequency and pulse mode requested for the
+ * specified DFS clock domain.
+ *
+ * @param ClockId The DFS ID of the targeted clock domain.
+ * @param pBusyKHz A pointer to a variable filled with boost frequency in kHz.
+ * @param pBusyKHz A pointer to a variable filled with pulse mode indicator.
+ * @param pBusyExpireMs A pointer to a variable filled with busy boost
+ * expiration interval in ms.
+ */
+void NvRmPrivDfsGetBusyHint(
+ NvRmDfsClockId ClockId,
+ NvRmFreqKHz* pBusyKHz,
+ NvBool* pBusyPulseMode,
+ NvU32* pBusyExpireMs);
+
+/**
+ * Gets maximum frequency for the specified DFS clock domain.
+ *
+ * @param ClockId The DFS ID of the targeted clock domain.
+ *
+ * @return Maximum domain frequency in kHz
+ */
+NvRmFreqKHz NvRmPrivDfsGetMaxKHz(NvRmDfsClockId ClockId);
+
+/**
+ * Gets minimum frequency for the specified DFS clock domain.
+ *
+ * @param ClockId The DFS ID of the targeted clock domain.
+ *
+ * @return Minimum domain frequency in kHz
+ */
+NvRmFreqKHz NvRmPrivDfsGetMinKHz(NvRmDfsClockId ClockId);
+
+/**
+ * Signals DFS clock control thread
+ *
+ * @param Mode Synchronization mode. In synchronous mode this function returns
+ * to the caller after DFS clock control procedure is executed (blocking call).
+ * In asynchronous mode returns immediately after control thread is signaled.
+ */
+void NvRmPrivDfsSignal(NvRmDfsBusyHintSyncMode Mode);
+
+/**
+ * Synchronize DFS samplers with current clock frequencies
+ */
+void NvRmPrivDfsResync(void);
+
+/**
+ * Gets DFS ready for low power state entry.
+ *
+ * @param state Target low power state.
+ *
+ */
+void NvRmPrivDfsSuspend(NvOdmSocPowerState state);
+
+/**
+ * Restore clock sources after exit from low power state.
+ *
+ * @param hRmDevice The RM device handle.
+ */
+void
+NvRmPrivClocksResume(NvRmDeviceHandle hRmDevice);
+
+
+/**
+ * Initializes DVS settings
+ */
+void NvRmPrivDvsInit(void);
+
+/**
+ * Scales core voltage according to DFS controlled clock frequencies.
+ *
+ * @param BeforeFreqChange Indicates whether this function is called
+ * before (NV_TRUE) or after (NV_FALSE) frequency change.
+ * @param CpuMv Core voltage in mV required to run CPU at clock source
+ * frequency selected by DFS.
+ * @param SystemMv Core voltage in mV required to run AVP/System at clock
+ * source frequency selected by DFS.
+ * @param EmcMv Core voltage in mV required to run EMC/DDR at clock source
+ * frequency selected by DFS.
+ */
+void NvRmPrivVoltageScale(
+ NvBool BeforeFreqChange,
+ NvRmMilliVolts CpuMv,
+ NvRmMilliVolts SystemMv,
+ NvRmMilliVolts EmcMv);
+
+/**
+ * Requests core voltage update.
+ *
+ * @param TargetMv Requested core voltage level in mV.
+ */
+void NvRmPrivDvsRequest(NvRmMilliVolts TargetMv);
+
+/**
+ * Outputs debug messages for starvation hints sent by the specified client.
+ *
+ * @param ClientId The client ID assigned by the RM power manager.
+ * @param ClientTag The client tag reported to the RM power manager.
+ * @param pMultiHint Pointer to a list of starvation hints sent by the client.
+ * @param NumHints Number of entries in the pMultiHint list.
+ *
+ */
+void NvRmPrivStarvationHintPrintf(
+ NvU32 ClientId,
+ NvU32 ClientTag,
+ const NvRmDfsStarvationHint* pMultiHint,
+ NvU32 NumHints);
+
+/**
+ * Outputs debug messages for busy hints sent by the specified client.
+ *
+ * @param ClientId The client ID assigned by the RM power manager.
+ * @param ClientTag The client tag reported to the RM power manager.
+ * @param pMultiHint Pointer to a list of busy hints sent by the client.
+ * @param NumHints Number of entries in the pMultiHint list.
+ *
+ */
+void NvRmPrivBusyHintPrintf(
+ NvU32 ClientId,
+ NvU32 ClientTag,
+ const NvRmDfsBusyHint* pMultiHint,
+ NvU32 NumHints);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // INCLUDED_NVRM_POWER_PRIVATE_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_priv_ap_general.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_priv_ap_general.h
new file mode 100644
index 000000000000..ded480bdb41d
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_priv_ap_general.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+ /** @file
+ *
+ * @b Description: Contains the maximum instance of the controller on soc.
+ * Must be >= the max of all chips.
+ */
+
+#ifndef INCLUDED_NVRM_PRIV_AP_GENERAL_H
+#define INCLUDED_NVRM_PRIV_AP_GENERAL_H
+
+
+// Dma specific definitions for latest SOC
+
+// Maximum number of DMA channels available on SOC.
+#define MAX_APB_DMA_CHANNELS 32
+
+
+// SPI specific definitions for latest SOC
+#define MAX_SPI_CONTROLLERS 8
+
+#define MAX_SLINK_CONTROLLERS 8
+
+
+// I2C specific definitions for latest soc
+#define MAX_I2C_CONTROLLERS 3
+
+#define MAX_DVC_CONTROLLERS 1
+
+#endif // INCLUDED_NVRM_PRIV_AP_GENERAL_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_processor.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_processor.h
new file mode 100644
index 000000000000..48c8f57e9f64
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_processor.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_PROCESSOR_H
+#define INCLUDED_NVRM_PROCESSOR_H
+
+#include "nvcommon.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+//==========================================================================
+// ARM CPSR/SPSR definitions
+//==========================================================================
+
+#define PSR_MODE_MASK 0x1F
+#define PSR_MODE_USR 0x10
+#define PSR_MODE_FIQ 0x11
+#define PSR_MODE_IRQ 0x12
+#define PSR_MODE_SVC 0x13
+#define PSR_MODE_ABT 0x17
+#define PSR_MODE_UND 0x1B
+#define PSR_MODE_SYS 0x1F // only available on ARM Arch. v4 and higher
+#define PSR_MODE_MON 0x16 // only available on ARM Arch. v6 and higher with TrustZone extension
+
+
+//==========================================================================
+// Compiler-independent abstraction macros.
+//==========================================================================
+
+#define IS_USER_MODE(cpsr) ((cpsr & PSR_MODE_MASK) == PSR_MODE_USR)
+
+//==========================================================================
+// Compiler-specific instruction abstraction macros.
+//==========================================================================
+
+#if defined(__arm__) && !defined(__thumb__) // ARM compiler compiling ARM code
+
+ #if (__GNUC__) // GCC inline assembly syntax
+
+ static NV_INLINE NvU32
+ CountLeadingZeros(NvU32 x)
+ {
+ NvU32 count;
+ __asm__ __volatile__ ( \
+ "clz %0, %1 \r\t" \
+ :"=r"(count) \
+ :"r"(x));
+ return count;
+ }
+
+ #define GET_CPSR(x) __asm__ __volatile__ ( \
+ "mrs %0, cpsr\r\t" \
+ : "=r"(x))
+
+ #else // assume RVDS compiler
+ /*
+ * @brief Macro to abstract retrieval of the current processor
+ * status register (CPSR) value.
+ * @param x is a variable of type NvU32 that will receive
+ * the CPSR value.
+ */
+ #define GET_CPSR(x) __asm { MRS x, CPSR } // x = CPSR
+
+ static NV_INLINE NvU32
+ CountLeadingZeros(NvU32 x)
+ {
+ NvU32 count;
+ __asm { CLZ count, x }
+ return count;
+ }
+
+ #endif
+#else
+ /*
+ * @brief Macro to abstract retrieval of the current processor status register (CPSR) value.
+ * @param x is a variable of type NvU32 that will receive the CPSR value.
+ */
+ #define GET_CPSR(x) (x = PSR_MODE_USR) // Always assume USER mode for now
+
+ // If no built-in method for counting leading zeros do it the less efficient way.
+ static NV_INLINE NvU32
+ CountLeadingZeros(NvU32 x)
+ {
+ NvU32 i;
+
+ if (x)
+ {
+ i = 0;
+
+ do
+ {
+ if (x & 0x80000000)
+ {
+ break;
+ }
+ x <<= 1;
+ } while (++i < 32);
+ }
+ else
+ {
+ i = 32;
+ }
+
+ return i;
+ }
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // INCLUDED_NVRM_PROCESSOR_H
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_relocation_table.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_relocation_table.c
new file mode 100644
index 000000000000..fabd7121e24b
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_relocation_table.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvrm_relocation_table.h"
+#include "nvrm_hardware_access.h"
+#include "nvrm_module.h"
+#include "nvrm_moduleids.h"
+#include "nvrm_hw_devids.h"
+
+#define NVRM_ENABLE_PRINTF 0 // Module debug: 0=disable, 1=enable
+
+#if (NV_DEBUG && NVRM_ENABLE_PRINTF)
+#define NVRM_MODULE_PRINTF(x) NvOsDebugPrintf x
+#else
+#define NVRM_MODULE_PRINTF(x)
+#endif
+
+// Relocation table unpacking macros
+#define DEVICE_ID( i ) ( ( (i) & ( 0xFFFFUL << 16 ) ) >> 16 )
+#define DEVICE_MAJOR_REV( i ) ( ( (i) & ( 0xFUL << 12 ) ) >> 12 )
+#define DEVICE_MINOR_REV( i ) ( ( (i) & ( 0xFUL << 8 ) ) >> 8 )
+#define DEVICE_POWER_GROUP( i ) ( ( (i) & ( 0xFUL << 4 ) ) >> 4 )
+#define IRQ_VALID( i ) ( (i) >> 31 )
+#define IRQ_TARGET( i ) ( ( (i) & ( 0x3UL << 29 ) ) >> 29 )
+#define IRQ_INT_DEV_INDEX( i ) ( ( (i) & ( 0x1FFUL << 20 ) ) >> 20 )
+#define IRQ_DEVICE_INDEX( i ) ( ( (i) & ( 0xFFUL << 8 ) ) >> 8 )
+#define IRQ_INT_NUM(i) ( (i) & 0xFFul )
+
+NvRmModuleInstance s_InstanceTable[NVRM_MAX_MODULE_INSTANCES];
+
+/**
+ * Maps relocation table device ids to software module ids.
+ * NVRM_DEVICE_UNKNOWN for unknown ids (will keep parsing table),
+ * or NVRM_DEVICE_ERROR if something bad happened
+ * (will stop parsing the table).
+ *
+ * NVRM_DEVICE_UNKOWN can be used to cull the device list to save space by
+ * not allocating memory for devices that won't be used.
+ */
+NvU32
+NvRmPrivDevToModuleID(NvU32 devid)
+{
+ switch( devid ) {
+ /* actual module with registers */
+ case NVRM_DEVID_AC97:
+ return NvRmModuleID_Ac97;
+ case NVRM_DEVID_APB_DMA:
+ return NvRmPrivModuleID_ApbDma;
+ case NVRM_DEVID_APB_DMA_CH:
+ return NvRmPrivModuleID_ApbDmaChannel;
+ case NVRM_DEVID_ARB_PRI:
+ return NvRmModuleID_ArbPriority;
+ case NVRM_DEVID_ARB_SEM:
+ return NvRmModuleID_ArbitrationSema;
+ case NVRM_DEVID_CAR:
+ return NvRmPrivModuleID_ClockAndReset;
+ case NVRM_DEVID_CC:
+ return NvRmPrivModuleID_CC;
+ case NVRM_DEVID_CMC:
+ return NvRmModuleID_CacheMemCtrl;
+ case NVRM_DEVID_BSEA:
+ case NVRM_DEVID_AVPBSEA:
+ /* Module name changed to NVRM_DEVID_AVPBSEA in AP20 */
+ return NvRmModuleID_BseA;
+ case NVRM_DEVID_VDE:
+ return NvRmModuleID_Vde;
+ case NVRM_DEVID_CPU_INTR:
+ return NvRmPrivModuleID_InterruptCpu;
+ case NVRM_DEVID_DISPLAY:
+ return NvRmModuleID_Display;
+ case NVRM_DEVID_DSI:
+ return NvRmModuleID_Dsi;
+ case NVRM_DEVID_DVC:
+ return NvRmModuleID_Dvc;
+ case NVRM_DEVID_EIDE:
+ return NvRmModuleID_Ide;
+ case NVRM_DEVID_EMC:
+ return NvRmPrivModuleID_ExternalMemoryController;
+ case NVRM_DEVID_EPP:
+ return NvRmModuleID_Epp;
+ case NVRM_DEVID_EVENT:
+ return NvRmModuleID_EventCtrl;
+ case NVRM_DEVID_FLOW:
+ return NvRmModuleID_FlowCtrl;
+ case NVRM_DEVID_FUSE:
+ return NvRmModuleID_Fuse;
+ case NVRM_DEVID_KFUSE:
+ return NvRmModuleID_KFuse;
+ case NVRM_DEVID_GPIO:
+ return NvRmPrivModuleID_Gpio;
+ case NVRM_DEVID_GR2D:
+ return NvRmModuleID_2D;
+ case NVRM_DEVID_GR3D:
+ return NvRmModuleID_3D;
+ case NVRM_DEVID_HDMI:
+ return NvRmModuleID_Hdmi;
+ case NVRM_DEVID_HOST1X:
+ return NvRmModuleID_GraphicsHost;
+ case NVRM_DEVID_HSMMC:
+ return NvRmModuleID_Hsmmc;
+ case NVRM_DEVID_I2C:
+ return NvRmModuleID_I2c;
+ case NVRM_DEVID_I2S:
+ return NvRmModuleID_I2s;
+ case NVRM_DEVID_ICTLR:
+ return NvRmPrivModuleID_Interrupt;
+ case NVRM_DEVID_ICTLR_ARBGNT:
+ return NvRmPrivModuleID_InterruptArbGnt;
+ case NVRM_DEVID_ICTLR_DRQ:
+ return NvRmPrivModuleID_InterruptDrq;
+ case NVRM_DEVID_ISP:
+ return NvRmModuleID_Isp;
+ case NVRM_DEVID_KBC:
+ return NvRmModuleID_Kbc;
+ case NVRM_DEVID_MC:
+ return NvRmPrivModuleID_MemoryController;
+ case NVRM_DEVID_MIPI_HS:
+ return NvRmModuleID_Mipi;
+ case NVRM_DEVID_MISC:
+ return NvRmModuleID_Misc;
+ case NVRM_DEVID_MPE:
+ return NvRmModuleID_Mpe;
+ case NVRM_DEVID_MSELECT:
+ return NvRmPrivModuleID_Mselect;
+ case NVRM_DEVID_NANDFLASH:
+ return NvRmModuleID_Nand;
+ case NVRM_DEVID_PMIF:
+ return NvRmModuleID_Pmif;
+ case NVRM_DEVID_PWFM:
+ return NvRmModuleID_Pwm;
+ case NVRM_DEVID_RTC:
+ return NvRmModuleID_Rtc;
+ case NVRM_DEVID_SDMMC:
+ case NVRM_DEVID_SDIO:
+ return NvRmModuleID_Sdio;
+ case NVRM_DEVID_SHR_SEM:
+ return NvRmModuleID_ResourceSema;
+
+ // Supporting only the slink controller for now, returning error for old
+ // slink controller.
+ case NVRM_DEVID_SLINK:
+ return NvRmModuleID_Slink;
+ case NVRM_DEVID_SPDIF:
+ return NvRmModuleID_Spdif;
+ case NVRM_DEVID_SPI:
+ return NvRmModuleID_Spi;
+ case NVRM_DEVID_STAT:
+ return NvRmModuleID_SysStatMonitor;
+ case NVRM_DEVID_SW_INTR:
+ return NvRmPrivModuleID_InterruptSw;
+ case NVRM_DEVID_TMR:
+ return NvRmModuleID_Timer;
+ case NVRM_DEVID_TMRUS:
+ return NvRmModuleID_TimerUs;
+ case NVRM_DEVID_TVO:
+ return NvRmModuleID_Tvo;
+ case NVRM_DEVID_TWC:
+ return NvRmModuleID_Twc;
+ case NVRM_DEVID_UART:
+ return NvRmModuleID_Uart;
+ case NVRM_DEVID_UCQ:
+ return NvRmModuleID_Ucq;
+ case NVRM_DEVID_AVPUCQ:
+ return NvRmModuleID_AvpUcq;
+ case NVRM_DEVID_USB:
+ return NvRmModuleID_Usb2Otg;
+ case NVRM_DEVID_VCP:
+ return NvRmModuleID_Vcp;
+ case NVRM_DEVID_VECTOR:
+ // FIXME: does this make sense?
+ return NvRmModuleID_ExceptionVector;
+ case NVRM_DEVID_VFIR:
+ return NvRmModuleID_Vfir;
+ case NVRM_DEVID_VI:
+ return NvRmModuleID_Vi;
+ case NVRM_DEVID_XIO:
+ return NvRmModuleID_Xio;
+ case NVRM_DEVID_UPTAG:
+ return NvRmPrivModuleID_ProcId;
+ case NVRM_DEVID_AHB_ARBC:
+ return NvRmPrivModuleID_Ahb_Arb_Ctrl;
+
+ /* memory (internal, external, etc - no registers) */
+ case NVRM_DEVID_EMEM:
+ return NvRmPrivModuleID_ExternalMemory;
+
+ case NVRM_DEVID_IMEM:
+ return NvRmPrivModuleID_InternalMemory;
+
+ case NVRM_DEVID_TCRAM:
+ return NvRmPrivModuleID_Tcram;
+
+ case NVRM_DEVID_IRAM:
+ return NvRmPrivModuleID_Iram;
+
+ case NVRM_DEVID_GART:
+ return NvRmPrivModuleID_Gart;
+
+ case NVRM_DEVID_EXIO:
+ return NvRmPrivModuleID_Mio_Exio;
+
+ case NVRM_DEVID_PMU_EXT:
+ return NvRmPrivModuleID_PmuExt;
+
+ case NVRM_DEVID_NOR:
+ return NvRmModuleID_Nor;
+
+ case NVRM_DEVID_CSI:
+ return NvRmModuleID_Csi;
+
+ case NVRM_DEVID_OWR:
+ return NvRmModuleID_OneWire;
+ case NVRM_DEVID_SNOR:
+ return NvRmModuleID_SyncNor;
+
+ case NVRM_DEVID_ARM_PERIPH:
+ return NvRmPrivModuleID_ArmPerif;
+
+ case NVRM_DEVID_ARM_ICTLR:
+ return NvRmPrivModuleID_ArmInterruptctrl;
+
+ case NVRM_DEVID_PCIE:
+ return NvRmPrivModuleID_Pcie;
+
+ case NVRM_DEVID_AHB_EMEM:
+ return NvRmPrivModuleID_AhbRemap;
+
+ case NVRM_DEVID_ARM_PL310:
+ return NvRmPrivModuleID_Pl310;
+
+ /* unknown or don't care */
+ default:
+ return NVRM_DEVICE_UNKNOWN;
+ }
+}
+
+static NvError
+NvRmPrivParseDevices( const NvU32 *table,
+ NvRmModuleInstance **instances,
+ NvRmModuleInstance **instanceLast,
+ NvRmModule *modules )
+{
+ NvError ret = NvSuccess;
+ NvU32 info;
+ NvU32 devid;
+ NvS32 index;
+ NvU32 count;
+ NvU8 devidx;
+ NvRmModuleInstance *inst = 0;
+ NvU32 modid;
+ NvU32 start;
+ NvU32 length;
+ NvS32 tmp_index;
+ NvU32 tmp_devid;
+ NvU8 tmp_devidx;
+ NvBool skip;
+
+ /* The first 32 bits of the table is the table version number */
+ index = 1;
+
+ /* count the total number of devices and allocate space for them.
+ * for each device, check if the device has already been found (multiply
+ * instantiated), if this is the first device instance, then find
+ * all of the rest of the devices to compact all devices together.
+ *
+ * after the module instances have been compacted, count the number of
+ * unique non-memory device ids and setup the module index table.
+ *
+ * only count devices that the NvRmPrivDevToModuleID function returns a
+ * valid module id for (don't count memory or unknown devices). it is ok
+ * for NvRmPrivDevToModuleID to return unknown for devices it doesn't care
+ * about.
+ */
+ count = 0;
+ while( NV_READ32( &table[index] ) )
+ {
+ info = NV_READ32( &table[index] );
+ devid = DEVICE_ID( info );
+ modid = NvRmPrivDevToModuleID(devid);
+ if( modid != NVRM_DEVICE_UNKNOWN )
+ {
+ count++;
+ }
+
+ if( modid == NVRM_DEVICE_ERROR )
+ {
+ NV_ASSERT( !"relocation table parsing error" );
+ goto fail;
+ }
+
+ index += 3;
+ }
+
+ /* reset index to the first device */
+ index = 1;
+
+ /* Use Instance array */
+ inst = s_InstanceTable;
+ /* Make sure we are not over stepping the array boundaries */
+ NV_ASSERT(NVRM_MAX_MODULE_INSTANCES >= (count + 1));
+
+ *instances = inst;
+ devidx = (NvU8)-1; /* -1 is the invalid/unavailable indicator */
+
+ /* pass over the relocation table again to fill in the instance table */
+ while( NV_READ32( &table[index] ) )
+ {
+ skip = NV_FALSE;
+ info = NV_READ32( &table[index++] );
+ start = NV_READ32( &table[index++] );
+ length = NV_READ32( &table[index++] );
+ devidx++;
+
+ devid = DEVICE_ID( info );
+ modid = NvRmPrivDevToModuleID( devid );
+
+ if( modid == NVRM_DEVICE_UNKNOWN )
+ {
+ /* keep going */
+ NVRM_MODULE_PRINTF(( "[Unknwn] devidx: %d devid: %d start: 0x%x "
+ "length: 0x%x\n", devidx, devid, start, length ));
+ continue;
+ }
+ else if( modid == NVRM_DEVICE_ERROR )
+ {
+ NVRM_MODULE_PRINTF(( "[Error] devidx: %d devid: %d start: 0x%x "
+ "length: 0x%x\n", devidx, devid, start, length ));
+ NV_ASSERT( !"relocation table parsing failure" );
+ goto fail;
+ }
+
+ /* search backwards to detect an already found instance */
+ tmp_index = index - 6;
+ while( tmp_index > 1 )
+ {
+ tmp_devid = DEVICE_ID( NV_READ32( &table[tmp_index] ) );
+ if( tmp_devid == devid )
+ {
+ skip = NV_TRUE;
+ break;
+ }
+
+ tmp_index -= 3;
+ }
+
+ /* already found this instance, continue to the next device */
+ if( skip )
+ {
+ continue;
+ }
+
+ /* scan forward to find all instances of this devid */
+ tmp_devid = devid;
+ tmp_index = index;
+ tmp_devidx = devidx;
+ for( ;; )
+ {
+ if( tmp_devid == devid )
+ {
+ inst->PhysAddr = start;
+ inst->Length = length;
+ inst->MajorVersion = (NvU8)DEVICE_MAJOR_REV(info);
+ inst->MinorVersion = (NvU8)DEVICE_MINOR_REV(info);
+ inst->DevPowerGroup = (NvU8)DEVICE_POWER_GROUP(info);
+ inst->VirtAddr = 0;
+ inst->DeviceId = devid;
+ inst->DevIdx = tmp_devidx;
+
+ NVRM_MODULE_PRINTF(( "[Device] devidx: %d devid: %d "
+ "addr: 0x%x length: 0x%x major: %d minor: %d\n",
+ tmp_devidx, devid, start, length,
+ inst->MajorVersion, inst->MinorVersion ));
+
+ NV_ASSERT( tmp_devidx < (NvU8)-1 );
+ /* (NvU8)-1 is the indicator for invalid/unavailable
+ instance and safeguard against overflow on idx too. */
+
+ inst++;
+ }
+
+ if( !NV_READ32( &table[tmp_index] ) )
+ {
+ break;
+ }
+
+ info = NV_READ32( &table[tmp_index++] );
+ start = NV_READ32( &table[tmp_index++] );
+ length = NV_READ32( &table[tmp_index++] );
+ tmp_devidx++;
+
+ tmp_devid = DEVICE_ID( info );
+ }
+ }
+
+ /* zero out the last instance */
+ NvOsMemset( inst, 0, sizeof(*inst) );
+ *instanceLast = inst;
+ inst = *instances;
+
+ /* setup the module index table:
+ * walk to instances - setup the module table.
+ */
+ index = 0;
+ devid = inst->DeviceId;
+ while( inst->DeviceId ) // null terminated instance array
+ {
+ if( devid == inst->DeviceId )
+ {
+ modid = NvRmPrivDevToModuleID(devid);
+ if(( modid != NVRM_DEVICE_UNKNOWN ) &&
+ ( modid != NVRM_DEVICE_ERROR ))
+ {
+ modules[modid].Index = (NvU16)index;
+ }
+ else
+ {
+ NV_ASSERT( !"relocation table parsing error" );
+ }
+ }
+
+ /* skip the rest of the instances */
+ do
+ {
+ inst++;
+ index++;
+ } while( inst->DeviceId == devid );
+
+ devid = inst->DeviceId;
+ }
+
+ return NvSuccess;
+
+fail:
+ *instances = 0;
+ return ret;
+}
+
+static NvRmModuleInstance *
+NvRmPrivGetInstance( NvRmModuleInstance *inst, NvU8 devidx )
+{
+ while( inst->DeviceId )
+ {
+ if( inst->DevIdx == devidx )
+ {
+ return inst;
+ }
+
+ inst++;
+ }
+
+ return 0;
+}
+
+static NvError
+NvRmPrivParseIrqs( const NvU32 *table, NvRmIrqMap *irqs,
+ NvRmModuleInstance *instances )
+{
+ NvU32 info;
+ NvU32 minor;
+ NvU32 index;
+ NvU16 ctlr_index[NVRM_MAX_INTERRUPT_CTLRS];
+ NvU16 ctlr;
+ NvU16 irq;
+ NvU8 devidx = 0;
+ NvU16 intridx = 0;
+ NvRmModuleInstance *inst;
+ NvRmModuleIrqMap *map;
+ NvU8 Valid;
+ NvU16 IntDevIndex;
+ NvU16 IntNum;
+ NvU32 devid;
+ NvU32 Affinity;
+ NvU32 Processor = NV_IS_AVP ? 2 : 1;
+ NvU8 irqBase = 0;
+
+ for (ctlr = 0; ctlr < NVRM_MAX_INTERRUPT_CTLRS; ++ctlr)
+ {
+ ctlr_index[ctlr] = 0xFFFF;
+ }
+
+ /* skip version */
+ index = 1;
+
+ /* find the interrupt controllers */
+ while( NV_READ32( &table[index] ) )
+ {
+ info = NV_READ32( &table[index] );
+ devid = DEVICE_ID( info );
+
+ // Main interrupt controller?
+ if (devid == NVRM_DEVID_ICTLR)
+ {
+ // The main interrupt controller instances are identified
+ // by their minor revision number.
+ minor = DEVICE_MINOR_REV(info);
+ NV_ASSERT(minor < NVRM_MAX_MAIN_INTR_CTLRS);
+ NV_ASSERT(ctlr_index[minor] == 0xFFFF);
+ ctlr_index[minor] = devidx;
+ }
+ else if (devid == NVRM_DEVID_ARM_ICTLR)
+ {
+ /* If the falcon interrupt controller is present then the IRQs
+ * start from 32. Falcon controller cannot be used when running on
+ * QT using
+ * EMUTRANS.
+ */
+#if !(NVCPU_IS_X86 || NV_IS_AVP)
+ irqBase = 32;
+#endif
+ }
+
+ index += 3;
+ devidx++;
+ }
+
+ /* skip the null separator between the device and irq table */
+ index++;
+
+ while( NV_READ32( &table[index] ) )
+ {
+ info = NV_READ32( &table[index++] );
+
+ // Extract the interrupt entry fields.
+ Valid = (NvU8)IRQ_VALID(info);
+ IntDevIndex = (NvU16)IRQ_INT_DEV_INDEX(info);
+ devidx = (NvU8)IRQ_DEVICE_INDEX(info);
+ IntNum = (NvU16)IRQ_INT_NUM(info);
+ Affinity = IRQ_TARGET(info);
+
+ NV_ASSERT(IntNum < NVRM_IRQS_PER_INTR_CTLR);
+
+ // Retrieve the device instance to which this interrupt belongs.
+ inst = NvRmPrivGetInstance( instances, devidx );
+ if( inst == NULL )
+ {
+ /* interrupt pointing to something that's unknown, skip it. */
+ continue;
+ }
+
+ // Locate the interrupt controller that manages this interrupt
+ irq = NVRM_IRQ_INVALID;
+ for( ctlr = 0; ctlr < NVRM_MAX_INTERRUPT_CTLRS; ctlr++ )
+ {
+ if (ctlr_index[ctlr] != 0xFFFF)
+ {
+ if( ctlr_index[ctlr] == IntDevIndex )
+ {
+ irq = irqBase + ( ctlr * 32 ) + IntNum;
+ break;
+ }
+ }
+ }
+
+ /* Don't take care of interrupts routed to interrupts other than main
+ * interrupt controller.
+ * */
+ if (irq == NVRM_IRQ_INVALID) continue;
+
+ map = inst->IrqMap;
+ if( map == 0 )
+ {
+ // Allocate a new device IRQ map.
+ NV_ASSERT(irqs->DeviceCount < NVRM_MAX_IRQ_DEVICES);
+ map = &irqs->DeviceIrq[ irqs->DeviceCount++ ];
+ inst->IrqMap = map;
+ }
+ else
+ {
+ NV_ASSERT(map->IrqCount < NVRM_MAX_DEVICE_IRQS);
+ }
+
+ if (Valid)
+ {
+ /* HW bug 562244 - Affinities are wrong in the relocation table. */
+ if (irq == 29 + irqBase)
+ {
+ Affinity = 1;
+ }
+ if (irq == 28 + irqBase)
+ {
+ Affinity = 2;
+ }
+
+
+ // Consider this IRQ for mapping only if the IRQ's processor
+ // affinity matches this processor or if the IRQ has no affinity.
+ if ((Affinity == 0)
+ || (Affinity == Processor))
+ {
+ // Add the IRQ to the device's IRQ list
+ map->Irq[ map->IrqCount++ ] = irq;
+ NVRM_MODULE_PRINTF(("[Interrupt %d] Device Index: %d "
+ "IntCtlr: %d IntNum: %d Irq: %d Affinity: %d\n",
+ intridx, devidx, ctlr, IntNum, irq, Affinity));
+ }
+ else
+ {
+ // This IRQ belongs to the other processor.
+ NVRM_MODULE_PRINTF(("[Interrupt %d] Device Index: %d "
+ "IntCtlr: %d IntNum: %d Irq: %d Affinity: %d mapped on "
+ "other processor\n",
+ intridx, devidx, ctlr, IntNum, irq, Affinity));
+ }
+ }
+ else
+ {
+ // Add placeholder to the device's IRQ list
+ map->Irq[ map->IrqCount++ ] = NVRM_IRQ_INVALID;
+ }
+ intridx++;
+ }
+
+ NVRM_MODULE_PRINTF(("\n"));
+
+ return NvSuccess;
+}
+
+NvError
+NvRmPrivRelocationTableParse(
+ const NvU32 *table,
+ NvRmModuleInstance **instances, NvRmModuleInstance **instanceLast,
+ NvRmModule *modules, NvRmIrqMap *irqs )
+{
+ NvError err;
+
+ /* only know how to parse version 1 tables */
+ NV_ASSERT( NV_READ32( &table[0] ) == 1 );
+
+ NVRM_MODULE_PRINTF(( "Relocation Table:\n" ));
+
+ /* parse the devices */
+ err = NvRmPrivParseDevices( table, instances, instanceLast,
+ modules );
+ if( err != NvSuccess )
+ {
+ return err;
+ }
+
+ /* parse the irq entries */
+ err = NvRmPrivParseIrqs( table, irqs, *instances );
+ if( err != NvSuccess )
+ {
+ return err;
+ }
+
+ return NvSuccess;
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_relocation_table.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_relocation_table.h
new file mode 100644
index 000000000000..c0eb4707d676
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_relocation_table.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_RELOCATION_TABLE_H
+#define INCLUDED_NVRM_RELOCATION_TABLE_H
+
+#include "nvcommon.h"
+#include "nvrm_init.h"
+
+/**
+ * The AP family supports a Relocation Table which lists the devices in the
+ * system, their version numbers, and their physical base addressess and
+ * aperture size. Interrupt information is also stored in the table.
+ *
+ * The relcation table format:
+ *
+ * +-------------------( 32 bits )-------------------------------------+
+ * | table version |
+ * +-------------------------------------------------------------------+
+ * | [ device table entries ] |
+ * +-------------------------------------------------------------------+
+ * | null (0) |
+ * +-------------------------------------------------------------------+
+ * | [ irq table entries ] |
+ * +-------------------------------------------------------------------+
+ * | null (0) |
+ * +-------------------------------------------------------------------+
+ *
+ * The device table entry format:
+ *
+ * +-------------------( 32 bits )-------------------------------------+
+ * | id [31:16] | major [15:12] | minor [11:8] | res [7:4] | bar [3:0] |
+ * |-------------------------------------------------------------------|
+ * | start address |
+ * |-------------------------------------------------------------------|
+ * | length |
+ * +-------------------------------------------------------------------+
+ *
+ * The irq entry format:
+ *
+ * +-------------------( 32 bits )-----------------------------------------+
+ * |V[31]|rsvd[30:29]|IntDevIdx[28:20]|rsvd[19:17]|DevIdx[16:8]|IntNum[7:0]|
+ * +-----------------------------------------------------------------------+
+ *
+ * Every entry (whether valid or not) will always contain an Interrupt
+ * Controller Device Index (IntDevIdx), a Device Index (DevIdx), and an
+ * Interrupt Number (IntNum) value. Whether or not that entry actually
+ * corresponds to an interrupt source is determined by the valid (V) bit.
+ * If the valid bit is 1, the interrupt number corresponds to an actual
+ * interrupt source. If the valid bit is zero, this entry represents an
+ * interrupt source that was present in a prior SOC but that is no longer
+ * used. The slot for that interrupt in the interrupt map table must be
+ * preseved because "indexed" interrupts are determined positionally.
+ * Removal of an interrupt would change the positional assignment of all
+ * following interrupt numbers and would break forward compatibility.
+ */
+
+#define NVRM_DEVICE_UNKNOWN ((NvU32)-2)
+#define NVRM_DEVICE_ERROR ((NvU32)-3)
+
+// The module index in the NvRmModule table is invalid; this is not an error.
+#define NVRM_MODULE_INVALID (0xFFFF)
+
+// Number of interrupt controllers
+#define NVRM_MAX_MAIN_INTR_CTLRS 5
+
+// Number of DMA transmit interrupt controllers
+#define NVRM_MAX_DRQ_INTR_CTLRS 2
+
+// Number of Arbitration Grant interrupt controllers
+#define NVRM_ARB_GNT_INTR_CTLRS 1
+
+// Number of interrupt controllers of all types
+#define NVRM_MAX_INTERRUPT_CTLRS (NVRM_MAX_MAIN_INTR_CTLRS + \
+ NVRM_MAX_DRQ_INTR_CTLRS + NVRM_ARB_GNT_INTR_CTLRS)
+
+// Relative position of first DMA transmit interrupt controller
+#define NVRM_FIRST_DRQ_INTR_CTLR (NVRM_MAX_MAIN_INTR_CTLRS)
+
+// Relative position of first Arbitration Grant interrupt controller
+#define NVRM_FIRST_ARB_INTR_CTLR (NVRM_MAX_MAIN_INTR_CTLRS + \
+ NVRM_MAX_DRQ_INTR_CTLRS)
+
+// Number of IRQs per interrupt controller (main, DRQ, & ARB)
+#define NVRM_IRQS_PER_INTR_CTLR 32
+
+// Number of IRQs per GPIO controller
+#define NVRM_IRQS_PER_GPIO_CTLR 32
+
+// Number of IRQs per AHB DMA channel
+#define NVRM_IRQS_PER_AHB_DMA_CHAN 1
+
+// Number of IRQs per APB DMA channel
+#define NVRM_IRQS_PER_APB_DMA_CHAN 1
+
+// Invalid IRQ valid
+#define NVRM_IRQ_INVALID 0xFFFF
+
+// Maximum number of interrupts per device
+#define NVRM_MAX_DEVICE_IRQS 8
+
+// Maximum number of IRQs
+#define NVRM_MAX_IRQS 500
+
+// Maximum number of devices that can generate IRQs
+// !!!CHECKME!!! CHECK THE SIZING OF THIS VALUE
+#define NVRM_MAX_IRQ_DEVICES 96
+
+// Maximum number of DMA channels
+#define NVRM_MAX_DMA_CHANNELS 32
+
+// This is the Maximum number of instance of all modules on any chip
+// supported by Rm.
+// Need to increase this value when more modules are added in the up comming
+// chips.
+#define NVRM_MAX_MODULE_INSTANCES 256
+
+/**
+ * Device IRQ assignments structure.
+ */
+typedef struct NvRmModuleIrqMapRec
+{
+ /* Number of IRQs owned by this device */
+ NvU16 IrqCount;
+
+ /* Maximum instance IRQ index */
+ NvU16 IndexMax;
+
+ /* Base IRQ for subcontroller "index" IRQ fanout */
+ NvU16 IndexBase;
+
+ /* IRQs owned by this device */
+ NvU16 Irq[NVRM_MAX_DEVICE_IRQS];
+} NvRmModuleIrqMap;
+
+/**
+ * System IRQ assignments structure.
+ */
+typedef struct NvRmIrqMapRec
+{
+ /* Number of devices owning IRQs */
+ NvU32 DeviceCount;
+
+ /* Device IRQ mapping */
+ NvRmModuleIrqMap DeviceIrq[NVRM_MAX_IRQ_DEVICES];
+} NvRmIrqMap;
+
+/**
+ * Some hardware modules may be instantiated multiple times - all hw modules
+ * are mapped into this structure.
+ */
+typedef struct NvRmModuleInstanceRec
+{
+ /* the base address of the module instance */
+ NvRmPhysAddr PhysAddr;
+
+ /* length of the aperture */
+ NvU32 Length;
+
+ /* hardware version */
+ NvU8 MajorVersion;
+ NvU8 MinorVersion;
+
+ /* power group */
+ NvU8 DevPowerGroup;
+
+ /* the original index into the relocation table */
+ NvU8 DevIdx;
+
+ /* hardware device id */
+ NvU32 DeviceId;
+
+ /* Irq mapping for this module instance */
+ NvRmModuleIrqMap *IrqMap;
+
+ /* virtual address: will be mapped by a later mechanism. this is here
+ * as a space optimization.
+ */
+ void *VirtAddr;
+
+ /* Module specific data like clocks, resets etc.. */
+ void *ModuleData;
+} NvRmModuleInstance;
+
+/**
+ * Module index table. Each index points to the first module instance in the
+ * NvRmModuleInstance table. The NvRmModule table itself is indexed by module
+ * id.
+ */
+typedef struct NvRmModuleRec
+{
+ /* offset into the NvRmModuleInstance table */
+ NvU16 Index;
+} NvRmModule;
+
+/**
+ * Maps relocation table device ids to software module ids.
+ * NVRM_DEVICE_UNKNOWN for unknown ids (will keep parsing table),
+ * or NVRM_DEVICE_ERROR if something bad happened
+ * (will stop parsing the table).
+ *
+ * NVRM_DEVICE_UNKOWN can be used to cull the device list to save space by
+ * not allocating memory for devices that won't be used.
+ */
+NvU32 NvRmPrivDevToModuleID(NvU32 devid);
+
+/**
+ * Parse the relocation table.
+ *
+ * The module instance table (NvRmModuleInstance) will be allocated to exactly
+ * match the number of hardware modules in the system rather than using a
+ * worst-case number of instances for all hardware modules.
+ *
+ * The module table should be allocated prior to this function and should be
+ * sized to the maximum number of module ids.
+ *
+ * The irq map will not be allocated (statically sized).
+ *
+ * The instance array will be null terminated -- the last instance will contain
+ * zero in all of its fields.
+ *
+ * @param hDevice The resource manager instance
+ * @param table The relocation table
+ * @param instances Out param - will contain the allocated instance table
+ * @param instanceLast Out param - will contain the last allocated instance + 1
+ * @param modules Out param - will contain the allocated module table
+ * @param irqs The irq table - will be filled in by the parser
+ */
+NvError
+NvRmPrivRelocationTableParse(
+ const NvU32 *table,
+ NvRmModuleInstance **instances,
+ NvRmModuleInstance **instanceLast,
+ NvRmModule *modules,
+ NvRmIrqMap *irqs );
+
+#endif
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_rmctrace.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_rmctrace.c
new file mode 100644
index 000000000000..cd5678d6f1ba
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_rmctrace.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvrm_rmctrace.h"
+#include "nvos.h"
+#include "nvassert.h"
+
+NvError NvRmRmcOpen( const char *name, NvRmRmcFile *rmc )
+{
+ return NvError_NotSupported;
+}
+
+void NvRmRmcClose( NvRmRmcFile *rmc )
+{
+}
+
+void NvRmRmcTrace( NvRmRmcFile *rmc, const char *format, ... )
+{
+}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_structure.h b/arch/arm/mach-tegra/nvrm/core/common/nvrm_structure.h
new file mode 100644
index 000000000000..da3f68423c02
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_structure.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_STRUCTURE_H
+#define INCLUDED_NVRM_STRUCTURE_H
+
+/*
+ * nvrm_structure.h defines all of the internal data structures for the
+ * resource manager which are chip independent.
+ *
+ * Don't add chip specific stuff to this file.
+ */
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvrm_module.h"
+#include "nvrm_module_private.h"
+#include "nvrm_chipid.h"
+#include "nvrm_interrupt.h"
+#include "nvrm_memmgr.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_rmctrace.h"
+#include "nvrm_configuration.h"
+#include "nvrm_relocation_table.h"
+#include "nvrm_moduleids.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif /* __cplusplus */
+
+typedef struct RmConfigurationVariables_t
+{
+ /* RMC Trace file name */
+ char RMCTraceFileName[ NVRM_CFG_MAXLEN ];
+
+ /* chiplib name */
+ char Chiplib[ NVRM_CFG_MAXLEN ];
+
+ /* chiplib args */
+ char ChiplibArgs[ NVRM_CFG_MAXLEN ];
+
+} RmConfigurationVariables;
+
+/* memory pool information */
+typedef struct RmMemoryPool_t
+{
+ NvU32 base;
+ NvU32 size;
+} RmMemoryPool;
+
+/* The state for the resource manager */
+typedef struct NvRmDeviceRec
+{
+ RmConfigurationVariables cfg;
+ NvRmRmcFile rmc;
+ NvBool rmc_enable;
+ NvOsMutexHandle mutex;
+ // FIXME: this is hardcoded to the number of tristate registers in AP15.
+ NvS16 TristateRefCount[4 * sizeof(NvU32)*8];
+ NvU32 refcount;
+
+ NvOsMutexHandle MemMgrMutex;
+ NvOsMutexHandle PinMuxMutex;
+ NvOsMutexHandle CarMutex; /* r-m-w top level CAR registers mutex */
+
+ /* chip id */
+ NvRmChipId ChipId;
+
+ /* module instances and module index table */
+ NvRmModuleTable ModuleTable;
+
+ RmMemoryPool ExtMemoryInfo;
+ RmMemoryPool IramMemoryInfo;
+ RmMemoryPool GartMemoryInfo;
+
+ NvU16 MaxIrqs;
+
+ const NvU32 ***PinMuxTable;
+ // FIXME: get rid of all the various Init and Open functions in favor
+ // of a sane state machine for system boot/initialization
+ NvBool bPreInit;
+ NvBool bBasicInit;
+} NvRmDevice;
+
+// FIXME: This macro should be comming from the relocation table.
+#define NVRM_MAX_INSTANCES 32
+
+/**
+ * Sub-contoller interrupt decoder description forward reference.
+ */
+typedef struct NvRmIntrDecoderRec *NvRmIntrDecoderHandle;
+
+/**
+ * Attributes of the Interrupt sub-decoders.
+ */
+typedef struct NvRmIntrDecoderRec
+{
+ NvRmModuleID ModuleID;
+
+ // Number of IRQs owned by this sub-controller.
+ // This value is same for all the instances of the controller.
+ NvU32 SubIrqCount;
+
+ // Number of instance for this sub-decoder
+ NvU32 NumberOfInstances;
+
+ // Main controller IRQ.
+ NvU16 MainIrq[NVRM_MAX_INSTANCES];
+
+ // First IRQ owned by this sub-controller.
+ NvU16 SubIrqFirst[NVRM_MAX_INSTANCES];
+
+ // Last IRQ owned by this sub-controller.
+ NvU16 SubIrqLast[NVRM_MAX_INSTANCES];
+
+} NvRmIntrDecoder;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // INCLUDED_NVRM_STRUCTURE_H
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/Makefile b/arch/arm/mach-tegra/nvrm/dispatch/Makefile
new file mode 100644
index 000000000000..959155944f47
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/Makefile
@@ -0,0 +1,28 @@
+ccflags-y += -DNV_IS_AVP=0
+ccflags-y += -DNV_OAL=0
+ccflags-y += -DNV_USE_FUSE_CLOCK_ENABLE=0
+ifeq ($(CONFIG_MACH_TEGRA_GENERIC_DEBUG),y)
+ccflags-y += -DNV_DEBUG=1
+else
+ccflags-y += -DNV_DEBUG=0
+endif
+
+obj-y += NvRm_Dispatch.o
+obj-y += nvrm_analog_dispatch.o
+obj-y += nvrm_diag_dispatch.o
+obj-y += nvrm_dma_dispatch.o
+obj-y += nvrm_gpio_dispatch.o
+obj-y += nvrm_i2c_dispatch.o
+obj-y += nvrm_owr_dispatch.o
+obj-y += nvrm_pwm_dispatch.o
+obj-y += nvrm_init_dispatch.o
+obj-y += nvrm_interrupt_dispatch.o
+obj-y += nvrm_memmgr_dispatch.o
+obj-y += nvrm_module_dispatch.o
+obj-y += nvrm_pinmux_dispatch.o
+obj-y += nvrm_power_dispatch.o
+obj-y += nvrm_spi_dispatch.o
+obj-y += nvrm_pmu_dispatch.o
+obj-y += nvrm_keylist_dispatch.o
+obj-y += nvrm_pcie_dispatch.o
+obj-y += nvrm_memctrl_dispatch.o
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/NvRm_Dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/NvRm_Dispatch.c
new file mode 100644
index 000000000000..e8ea4ee5be9e
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/NvRm_Dispatch.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvidlcmd.h"
+#include "nvreftrack.h"
+#include "nvrm_memctrl.h"
+#include "nvrm_pcie.h"
+#include "nvrm_pwm.h"
+#include "nvrm_keylist.h"
+#include "nvrm_pmu.h"
+#include "nvrm_diag.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_analog.h"
+#include "nvrm_owr.h"
+#include "nvrm_i2c.h"
+#include "nvrm_spi.h"
+#include "nvrm_interrupt.h"
+#include "nvrm_dma.h"
+#include "nvrm_power.h"
+#include "nvrm_gpio.h"
+#include "nvrm_module.h"
+#include "nvrm_memmgr.h"
+#include "nvrm_init.h"
+NvError nvrm_memctrl_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_pcie_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_pwm_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_keylist_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_pmu_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_diag_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_pinmux_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_analog_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_owr_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_i2c_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_spi_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_interrupt_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_dma_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_power_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_gpio_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_module_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_memmgr_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_init_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+
+// NvRm Package
+typedef enum
+{
+ NvRm_Invalid = 0,
+ NvRm_nvrm_memctrl,
+ NvRm_nvrm_pcie,
+ NvRm_nvrm_pwm,
+ NvRm_nvrm_keylist,
+ NvRm_nvrm_pmu,
+ NvRm_nvrm_diag,
+ NvRm_nvrm_pinmux,
+ NvRm_nvrm_analog,
+ NvRm_nvrm_owr,
+ NvRm_nvrm_i2c,
+ NvRm_nvrm_spi,
+ NvRm_nvrm_interrupt,
+ NvRm_nvrm_dma,
+ NvRm_nvrm_power,
+ NvRm_nvrm_gpio,
+ NvRm_nvrm_module,
+ NvRm_nvrm_memmgr,
+ NvRm_nvrm_init,
+ NvRm_Num,
+ NvRm_Force32 = 0x7FFFFFFF,
+} NvRm;
+
+typedef NvError (* NvIdlDispatchFunc)( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+
+typedef struct NvIdlDispatchTableRec
+{
+ NvU32 PackageId;
+ NvIdlDispatchFunc DispFunc;
+} NvIdlDispatchTable;
+
+static NvIdlDispatchTable gs_DispatchTable[] =
+{
+ { NvRm_nvrm_memctrl, nvrm_memctrl_Dispatch },
+ { NvRm_nvrm_pcie, nvrm_pcie_Dispatch },
+ { NvRm_nvrm_pwm, nvrm_pwm_Dispatch },
+ { NvRm_nvrm_keylist, nvrm_keylist_Dispatch },
+ { NvRm_nvrm_pmu, nvrm_pmu_Dispatch },
+ { NvRm_nvrm_diag, nvrm_diag_Dispatch },
+ { NvRm_nvrm_pinmux, nvrm_pinmux_Dispatch },
+ { NvRm_nvrm_analog, nvrm_analog_Dispatch },
+ { NvRm_nvrm_owr, nvrm_owr_Dispatch },
+ { NvRm_nvrm_i2c, nvrm_i2c_Dispatch },
+ { NvRm_nvrm_spi, nvrm_spi_Dispatch },
+ { NvRm_nvrm_interrupt, nvrm_interrupt_Dispatch },
+ { NvRm_nvrm_dma, nvrm_dma_Dispatch },
+ { NvRm_nvrm_power, nvrm_power_Dispatch },
+ { NvRm_nvrm_gpio, nvrm_gpio_Dispatch },
+ { NvRm_nvrm_module, nvrm_module_Dispatch },
+ { NvRm_nvrm_memmgr, nvrm_memmgr_Dispatch },
+ { NvRm_nvrm_init, nvrm_init_Dispatch },
+ { 0 },
+};
+
+NvError NvRm_Dispatch( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvU32 packid_;
+ NvU32 funcid_;
+ NvIdlDispatchTable *table_;
+
+ NV_ASSERT( InBuffer );
+ NV_ASSERT( OutBuffer );
+
+ packid_ = ((NvU32 *)InBuffer)[0];
+ funcid_ = ((NvU32 *)InBuffer)[1];
+ table_ = gs_DispatchTable;
+
+ if ( packid_-1 >= NV_ARRAY_SIZE(gs_DispatchTable) ||
+ !table_[packid_ - 1].DispFunc )
+ return NvError_IoctlFailed;
+
+ return table_[packid_ - 1].DispFunc( funcid_, InBuffer, InSize,
+ OutBuffer, OutSize, Ctx );
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_analog_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_analog_dispatch.c
new file mode 100644
index 000000000000..a94b79914907
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_analog_dispatch.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_analog.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmUsbDetectChargerState_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvU32 wait;
+} NV_ALIGN(4) NvRmUsbDetectChargerState_in;
+
+typedef struct NvRmUsbDetectChargerState_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmUsbDetectChargerState_inout;
+
+typedef struct NvRmUsbDetectChargerState_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmUsbDetectChargerState_out;
+
+typedef struct NvRmUsbDetectChargerState_params_t
+{
+ NvRmUsbDetectChargerState_in in;
+ NvRmUsbDetectChargerState_inout inout;
+ NvRmUsbDetectChargerState_out out;
+} NvRmUsbDetectChargerState_params;
+
+typedef struct NvRmUsbIsConnected_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+} NV_ALIGN(4) NvRmUsbIsConnected_in;
+
+typedef struct NvRmUsbIsConnected_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmUsbIsConnected_inout;
+
+typedef struct NvRmUsbIsConnected_out_t
+{
+ NvBool ret_;
+} NV_ALIGN(4) NvRmUsbIsConnected_out;
+
+typedef struct NvRmUsbIsConnected_params_t
+{
+ NvRmUsbIsConnected_in in;
+ NvRmUsbIsConnected_inout inout;
+ NvRmUsbIsConnected_out out;
+} NvRmUsbIsConnected_params;
+
+typedef struct NvRmAnalogGetTvDacConfiguration_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvRmAnalogTvDacType Type;
+} NV_ALIGN(4) NvRmAnalogGetTvDacConfiguration_in;
+
+typedef struct NvRmAnalogGetTvDacConfiguration_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmAnalogGetTvDacConfiguration_inout;
+
+typedef struct NvRmAnalogGetTvDacConfiguration_out_t
+{
+ NvU8 ret_;
+} NV_ALIGN(4) NvRmAnalogGetTvDacConfiguration_out;
+
+typedef struct NvRmAnalogGetTvDacConfiguration_params_t
+{
+ NvRmAnalogGetTvDacConfiguration_in in;
+ NvRmAnalogGetTvDacConfiguration_inout inout;
+ NvRmAnalogGetTvDacConfiguration_out out;
+} NvRmAnalogGetTvDacConfiguration_params;
+
+typedef struct NvRmAnalogInterfaceControl_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvRmAnalogInterface Interface;
+ NvBool Enable;
+ void* Config;
+ NvU32 ConfigLength;
+} NV_ALIGN(4) NvRmAnalogInterfaceControl_in;
+
+typedef struct NvRmAnalogInterfaceControl_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmAnalogInterfaceControl_inout;
+
+typedef struct NvRmAnalogInterfaceControl_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmAnalogInterfaceControl_out;
+
+typedef struct NvRmAnalogInterfaceControl_params_t
+{
+ NvRmAnalogInterfaceControl_in in;
+ NvRmAnalogInterfaceControl_inout inout;
+ NvRmAnalogInterfaceControl_out out;
+} NvRmAnalogInterfaceControl_params;
+
+static NvError NvRmUsbDetectChargerState_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmUsbDetectChargerState_in *p_in;
+ NvRmUsbDetectChargerState_out *p_out;
+
+ p_in = (NvRmUsbDetectChargerState_in *)InBuffer;
+ p_out = (NvRmUsbDetectChargerState_out *)((NvU8 *)OutBuffer + OFFSET(NvRmUsbDetectChargerState_params, out) - OFFSET(NvRmUsbDetectChargerState_params, inout));
+
+
+ p_out->ret_ = NvRmUsbDetectChargerState( p_in->hDevice, p_in->wait );
+
+ return err_;
+}
+
+static NvError NvRmUsbIsConnected_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmUsbIsConnected_in *p_in;
+ NvRmUsbIsConnected_out *p_out;
+
+ p_in = (NvRmUsbIsConnected_in *)InBuffer;
+ p_out = (NvRmUsbIsConnected_out *)((NvU8 *)OutBuffer + OFFSET(NvRmUsbIsConnected_params, out) - OFFSET(NvRmUsbIsConnected_params, inout));
+
+
+ p_out->ret_ = NvRmUsbIsConnected( p_in->hDevice );
+
+ return err_;
+}
+
+static NvError NvRmAnalogGetTvDacConfiguration_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmAnalogGetTvDacConfiguration_in *p_in;
+ NvRmAnalogGetTvDacConfiguration_out *p_out;
+
+ p_in = (NvRmAnalogGetTvDacConfiguration_in *)InBuffer;
+ p_out = (NvRmAnalogGetTvDacConfiguration_out *)((NvU8 *)OutBuffer + OFFSET(NvRmAnalogGetTvDacConfiguration_params, out) - OFFSET(NvRmAnalogGetTvDacConfiguration_params, inout));
+
+
+ p_out->ret_ = NvRmAnalogGetTvDacConfiguration( p_in->hDevice, p_in->Type );
+
+ return err_;
+}
+
+static NvError NvRmAnalogInterfaceControl_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmAnalogInterfaceControl_in *p_in;
+ NvRmAnalogInterfaceControl_out *p_out;
+ void* Config = NULL;
+
+ p_in = (NvRmAnalogInterfaceControl_in *)InBuffer;
+ p_out = (NvRmAnalogInterfaceControl_out *)((NvU8 *)OutBuffer + OFFSET(NvRmAnalogInterfaceControl_params, out) - OFFSET(NvRmAnalogInterfaceControl_params, inout));
+
+ if( p_in->ConfigLength && p_in->Config )
+ {
+ Config = (void* )NvOsAlloc( p_in->ConfigLength );
+ if( !Config )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->Config )
+ {
+ err_ = NvOsCopyIn( Config, p_in->Config, p_in->ConfigLength );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmAnalogInterfaceControl( p_in->hDevice, p_in->Interface, p_in->Enable, Config, p_in->ConfigLength );
+
+ if(p_in->Config && Config)
+ {
+ err_ = NvOsCopyOut( p_in->Config, Config, p_in->ConfigLength );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( Config );
+ return err_;
+}
+
+NvError nvrm_analog_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_analog_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 3:
+ err_ = NvRmUsbDetectChargerState_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = NvRmUsbIsConnected_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmAnalogGetTvDacConfiguration_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmAnalogInterfaceControl_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_diag_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_diag_dispatch.c
new file mode 100644
index 000000000000..b521ca38d815
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_diag_dispatch.c
@@ -0,0 +1,1078 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_diag.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmDiagGetTemperature_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmTmonZoneId ZoneId;
+} NV_ALIGN(4) NvRmDiagGetTemperature_in;
+
+typedef struct NvRmDiagGetTemperature_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagGetTemperature_inout;
+
+typedef struct NvRmDiagGetTemperature_out_t
+{
+ NvError ret_;
+ NvS32 pTemperatureC;
+} NV_ALIGN(4) NvRmDiagGetTemperature_out;
+
+typedef struct NvRmDiagGetTemperature_params_t
+{
+ NvRmDiagGetTemperature_in in;
+ NvRmDiagGetTemperature_inout inout;
+ NvRmDiagGetTemperature_out out;
+} NvRmDiagGetTemperature_params;
+
+typedef struct NvRmDiagIsLockSupported_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+} NV_ALIGN(4) NvRmDiagIsLockSupported_in;
+
+typedef struct NvRmDiagIsLockSupported_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagIsLockSupported_inout;
+
+typedef struct NvRmDiagIsLockSupported_out_t
+{
+ NvBool ret_;
+} NV_ALIGN(4) NvRmDiagIsLockSupported_out;
+
+typedef struct NvRmDiagIsLockSupported_params_t
+{
+ NvRmDiagIsLockSupported_in in;
+ NvRmDiagIsLockSupported_inout inout;
+ NvRmDiagIsLockSupported_out out;
+} NvRmDiagIsLockSupported_params;
+
+typedef struct NvRmDiagConfigurePowerRail_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagPowerRailHandle hRail;
+ NvU32 VoltageMV;
+} NV_ALIGN(4) NvRmDiagConfigurePowerRail_in;
+
+typedef struct NvRmDiagConfigurePowerRail_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagConfigurePowerRail_inout;
+
+typedef struct NvRmDiagConfigurePowerRail_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagConfigurePowerRail_out;
+
+typedef struct NvRmDiagConfigurePowerRail_params_t
+{
+ NvRmDiagConfigurePowerRail_in in;
+ NvRmDiagConfigurePowerRail_inout inout;
+ NvRmDiagConfigurePowerRail_out out;
+} NvRmDiagConfigurePowerRail_params;
+
+typedef struct NvRmDiagModuleListPowerRails_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagModuleID id;
+ NvRmDiagPowerRailHandle * phRailList;
+} NV_ALIGN(4) NvRmDiagModuleListPowerRails_in;
+
+typedef struct NvRmDiagModuleListPowerRails_inout_t
+{
+ NvU32 pListSize;
+} NV_ALIGN(4) NvRmDiagModuleListPowerRails_inout;
+
+typedef struct NvRmDiagModuleListPowerRails_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagModuleListPowerRails_out;
+
+typedef struct NvRmDiagModuleListPowerRails_params_t
+{
+ NvRmDiagModuleListPowerRails_in in;
+ NvRmDiagModuleListPowerRails_inout inout;
+ NvRmDiagModuleListPowerRails_out out;
+} NvRmDiagModuleListPowerRails_params;
+
+typedef struct NvRmDiagPowerRailGetName_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagPowerRailHandle hRail;
+} NV_ALIGN(4) NvRmDiagPowerRailGetName_in;
+
+typedef struct NvRmDiagPowerRailGetName_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagPowerRailGetName_inout;
+
+typedef struct NvRmDiagPowerRailGetName_out_t
+{
+ NvU64 ret_;
+} NV_ALIGN(4) NvRmDiagPowerRailGetName_out;
+
+typedef struct NvRmDiagPowerRailGetName_params_t
+{
+ NvRmDiagPowerRailGetName_in in;
+ NvRmDiagPowerRailGetName_inout inout;
+ NvRmDiagPowerRailGetName_out out;
+} NvRmDiagPowerRailGetName_params;
+
+typedef struct NvRmDiagListPowerRails_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagPowerRailHandle * phRailList;
+} NV_ALIGN(4) NvRmDiagListPowerRails_in;
+
+typedef struct NvRmDiagListPowerRails_inout_t
+{
+ NvU32 pListSize;
+} NV_ALIGN(4) NvRmDiagListPowerRails_inout;
+
+typedef struct NvRmDiagListPowerRails_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagListPowerRails_out;
+
+typedef struct NvRmDiagListPowerRails_params_t
+{
+ NvRmDiagListPowerRails_in in;
+ NvRmDiagListPowerRails_inout inout;
+ NvRmDiagListPowerRails_out out;
+} NvRmDiagListPowerRails_params;
+
+typedef struct NvRmDiagModuleReset_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagModuleID id;
+ NvBool KeepAsserted;
+} NV_ALIGN(4) NvRmDiagModuleReset_in;
+
+typedef struct NvRmDiagModuleReset_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagModuleReset_inout;
+
+typedef struct NvRmDiagModuleReset_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagModuleReset_out;
+
+typedef struct NvRmDiagModuleReset_params_t
+{
+ NvRmDiagModuleReset_in in;
+ NvRmDiagModuleReset_inout inout;
+ NvRmDiagModuleReset_out out;
+} NvRmDiagModuleReset_params;
+
+typedef struct NvRmDiagClockScalerConfigure_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagClockSourceHandle hScaler;
+ NvRmDiagClockSourceHandle hInput;
+ NvU32 M;
+ NvU32 N;
+} NV_ALIGN(4) NvRmDiagClockScalerConfigure_in;
+
+typedef struct NvRmDiagClockScalerConfigure_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagClockScalerConfigure_inout;
+
+typedef struct NvRmDiagClockScalerConfigure_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagClockScalerConfigure_out;
+
+typedef struct NvRmDiagClockScalerConfigure_params_t
+{
+ NvRmDiagClockScalerConfigure_in in;
+ NvRmDiagClockScalerConfigure_inout inout;
+ NvRmDiagClockScalerConfigure_out out;
+} NvRmDiagClockScalerConfigure_params;
+
+typedef struct NvRmDiagPllConfigure_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagClockSourceHandle hPll;
+ NvU32 M;
+ NvU32 N;
+ NvU32 P;
+} NV_ALIGN(4) NvRmDiagPllConfigure_in;
+
+typedef struct NvRmDiagPllConfigure_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagPllConfigure_inout;
+
+typedef struct NvRmDiagPllConfigure_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagPllConfigure_out;
+
+typedef struct NvRmDiagPllConfigure_params_t
+{
+ NvRmDiagPllConfigure_in in;
+ NvRmDiagPllConfigure_inout inout;
+ NvRmDiagPllConfigure_out out;
+} NvRmDiagPllConfigure_params;
+
+typedef struct NvRmDiagOscillatorGetFreq_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagClockSourceHandle hOscillator;
+} NV_ALIGN(4) NvRmDiagOscillatorGetFreq_in;
+
+typedef struct NvRmDiagOscillatorGetFreq_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagOscillatorGetFreq_inout;
+
+typedef struct NvRmDiagOscillatorGetFreq_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmDiagOscillatorGetFreq_out;
+
+typedef struct NvRmDiagOscillatorGetFreq_params_t
+{
+ NvRmDiagOscillatorGetFreq_in in;
+ NvRmDiagOscillatorGetFreq_inout inout;
+ NvRmDiagOscillatorGetFreq_out out;
+} NvRmDiagOscillatorGetFreq_params;
+
+typedef struct NvRmDiagClockSourceListSources_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagClockSourceHandle hSource;
+ NvRmDiagClockSourceHandle * phSourceList;
+} NV_ALIGN(4) NvRmDiagClockSourceListSources_in;
+
+typedef struct NvRmDiagClockSourceListSources_inout_t
+{
+ NvU32 pListSize;
+} NV_ALIGN(4) NvRmDiagClockSourceListSources_inout;
+
+typedef struct NvRmDiagClockSourceListSources_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagClockSourceListSources_out;
+
+typedef struct NvRmDiagClockSourceListSources_params_t
+{
+ NvRmDiagClockSourceListSources_in in;
+ NvRmDiagClockSourceListSources_inout inout;
+ NvRmDiagClockSourceListSources_out out;
+} NvRmDiagClockSourceListSources_params;
+
+typedef struct NvRmDiagClockSourceGetScaler_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagClockSourceHandle hSource;
+} NV_ALIGN(4) NvRmDiagClockSourceGetScaler_in;
+
+typedef struct NvRmDiagClockSourceGetScaler_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagClockSourceGetScaler_inout;
+
+typedef struct NvRmDiagClockSourceGetScaler_out_t
+{
+ NvRmDiagClockScalerType ret_;
+} NV_ALIGN(4) NvRmDiagClockSourceGetScaler_out;
+
+typedef struct NvRmDiagClockSourceGetScaler_params_t
+{
+ NvRmDiagClockSourceGetScaler_in in;
+ NvRmDiagClockSourceGetScaler_inout inout;
+ NvRmDiagClockSourceGetScaler_out out;
+} NvRmDiagClockSourceGetScaler_params;
+
+typedef struct NvRmDiagClockSourceGetType_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagClockSourceHandle hSource;
+} NV_ALIGN(4) NvRmDiagClockSourceGetType_in;
+
+typedef struct NvRmDiagClockSourceGetType_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagClockSourceGetType_inout;
+
+typedef struct NvRmDiagClockSourceGetType_out_t
+{
+ NvRmDiagClockSourceType ret_;
+} NV_ALIGN(4) NvRmDiagClockSourceGetType_out;
+
+typedef struct NvRmDiagClockSourceGetType_params_t
+{
+ NvRmDiagClockSourceGetType_in in;
+ NvRmDiagClockSourceGetType_inout inout;
+ NvRmDiagClockSourceGetType_out out;
+} NvRmDiagClockSourceGetType_params;
+
+typedef struct NvRmDiagClockSourceGetName_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagClockSourceHandle hSource;
+} NV_ALIGN(4) NvRmDiagClockSourceGetName_in;
+
+typedef struct NvRmDiagClockSourceGetName_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagClockSourceGetName_inout;
+
+typedef struct NvRmDiagClockSourceGetName_out_t
+{
+ NvU64 ret_;
+} NV_ALIGN(4) NvRmDiagClockSourceGetName_out;
+
+typedef struct NvRmDiagClockSourceGetName_params_t
+{
+ NvRmDiagClockSourceGetName_in in;
+ NvRmDiagClockSourceGetName_inout inout;
+ NvRmDiagClockSourceGetName_out out;
+} NvRmDiagClockSourceGetName_params;
+
+typedef struct NvRmDiagModuleClockConfigure_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagModuleID id;
+ NvRmDiagClockSourceHandle hSource;
+ NvU32 divider;
+ NvBool Source1st;
+} NV_ALIGN(4) NvRmDiagModuleClockConfigure_in;
+
+typedef struct NvRmDiagModuleClockConfigure_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagModuleClockConfigure_inout;
+
+typedef struct NvRmDiagModuleClockConfigure_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagModuleClockConfigure_out;
+
+typedef struct NvRmDiagModuleClockConfigure_params_t
+{
+ NvRmDiagModuleClockConfigure_in in;
+ NvRmDiagModuleClockConfigure_inout inout;
+ NvRmDiagModuleClockConfigure_out out;
+} NvRmDiagModuleClockConfigure_params;
+
+typedef struct NvRmDiagModuleClockEnable_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagModuleID id;
+ NvBool enable;
+} NV_ALIGN(4) NvRmDiagModuleClockEnable_in;
+
+typedef struct NvRmDiagModuleClockEnable_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagModuleClockEnable_inout;
+
+typedef struct NvRmDiagModuleClockEnable_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagModuleClockEnable_out;
+
+typedef struct NvRmDiagModuleClockEnable_params_t
+{
+ NvRmDiagModuleClockEnable_in in;
+ NvRmDiagModuleClockEnable_inout inout;
+ NvRmDiagModuleClockEnable_out out;
+} NvRmDiagModuleClockEnable_params;
+
+typedef struct NvRmDiagModuleListClockSources_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagModuleID id;
+ NvRmDiagClockSourceHandle * phSourceList;
+} NV_ALIGN(4) NvRmDiagModuleListClockSources_in;
+
+typedef struct NvRmDiagModuleListClockSources_inout_t
+{
+ NvU32 pListSize;
+} NV_ALIGN(4) NvRmDiagModuleListClockSources_inout;
+
+typedef struct NvRmDiagModuleListClockSources_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagModuleListClockSources_out;
+
+typedef struct NvRmDiagModuleListClockSources_params_t
+{
+ NvRmDiagModuleListClockSources_in in;
+ NvRmDiagModuleListClockSources_inout inout;
+ NvRmDiagModuleListClockSources_out out;
+} NvRmDiagModuleListClockSources_params;
+
+typedef struct NvRmDiagListClockSources_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagClockSourceHandle * phSourceList;
+} NV_ALIGN(4) NvRmDiagListClockSources_in;
+
+typedef struct NvRmDiagListClockSources_inout_t
+{
+ NvU32 pListSize;
+} NV_ALIGN(4) NvRmDiagListClockSources_inout;
+
+typedef struct NvRmDiagListClockSources_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagListClockSources_out;
+
+typedef struct NvRmDiagListClockSources_params_t
+{
+ NvRmDiagListClockSources_in in;
+ NvRmDiagListClockSources_inout inout;
+ NvRmDiagListClockSources_out out;
+} NvRmDiagListClockSources_params;
+
+typedef struct NvRmDiagListModules_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDiagModuleID * pIdList;
+} NV_ALIGN(4) NvRmDiagListModules_in;
+
+typedef struct NvRmDiagListModules_inout_t
+{
+ NvU32 pListSize;
+} NV_ALIGN(4) NvRmDiagListModules_inout;
+
+typedef struct NvRmDiagListModules_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagListModules_out;
+
+typedef struct NvRmDiagListModules_params_t
+{
+ NvRmDiagListModules_in in;
+ NvRmDiagListModules_inout inout;
+ NvRmDiagListModules_out out;
+} NvRmDiagListModules_params;
+
+typedef struct NvRmDiagEnable_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+} NV_ALIGN(4) NvRmDiagEnable_in;
+
+typedef struct NvRmDiagEnable_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDiagEnable_inout;
+
+typedef struct NvRmDiagEnable_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDiagEnable_out;
+
+typedef struct NvRmDiagEnable_params_t
+{
+ NvRmDiagEnable_in in;
+ NvRmDiagEnable_inout inout;
+ NvRmDiagEnable_out out;
+} NvRmDiagEnable_params;
+
+static NvError NvRmDiagGetTemperature_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagGetTemperature_in *p_in;
+ NvRmDiagGetTemperature_out *p_out;
+
+ p_in = (NvRmDiagGetTemperature_in *)InBuffer;
+ p_out = (NvRmDiagGetTemperature_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagGetTemperature_params, out) - OFFSET(NvRmDiagGetTemperature_params, inout));
+
+
+ p_out->ret_ = NvRmDiagGetTemperature( p_in->hRmDeviceHandle, p_in->ZoneId, &p_out->pTemperatureC );
+
+ return err_;
+}
+
+static NvError NvRmDiagIsLockSupported_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagIsLockSupported_out *p_out;
+ p_out = (NvRmDiagIsLockSupported_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagIsLockSupported_params, out) - OFFSET(NvRmDiagIsLockSupported_params, inout));
+
+
+ p_out->ret_ = NvRmDiagIsLockSupported( );
+
+ return err_;
+}
+
+static NvError NvRmDiagConfigurePowerRail_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagConfigurePowerRail_in *p_in;
+ NvRmDiagConfigurePowerRail_out *p_out;
+
+ p_in = (NvRmDiagConfigurePowerRail_in *)InBuffer;
+ p_out = (NvRmDiagConfigurePowerRail_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagConfigurePowerRail_params, out) - OFFSET(NvRmDiagConfigurePowerRail_params, inout));
+
+
+ p_out->ret_ = NvRmDiagConfigurePowerRail( p_in->hRail, p_in->VoltageMV );
+
+ return err_;
+}
+
+static NvError NvRmDiagModuleListPowerRails_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagModuleListPowerRails_in *p_in;
+ NvRmDiagModuleListPowerRails_inout *p_inout;
+ NvRmDiagModuleListPowerRails_out *p_out;
+ NvRmDiagModuleListPowerRails_inout inout;
+ NvRmDiagPowerRailHandle *phRailList = NULL;
+
+ p_in = (NvRmDiagModuleListPowerRails_in *)InBuffer;
+ p_inout = (NvRmDiagModuleListPowerRails_inout *)((NvU8 *)InBuffer + OFFSET(NvRmDiagModuleListPowerRails_params, inout));
+ p_out = (NvRmDiagModuleListPowerRails_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagModuleListPowerRails_params, out) - OFFSET(NvRmDiagModuleListPowerRails_params, inout));
+
+ (void)inout;
+ inout.pListSize = p_inout->pListSize;
+ if( p_inout->pListSize && p_in->phRailList )
+ {
+ phRailList = (NvRmDiagPowerRailHandle *)NvOsAlloc( p_inout->pListSize * sizeof( NvRmDiagPowerRailHandle ) );
+ if( !phRailList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmDiagModuleListPowerRails( p_in->id, &inout.pListSize, phRailList );
+
+
+ p_inout = (NvRmDiagModuleListPowerRails_inout *)OutBuffer;
+ p_inout->pListSize = inout.pListSize;
+ if(p_in->phRailList && phRailList)
+ {
+ err_ = NvOsCopyOut( p_in->phRailList, phRailList, p_inout->pListSize * sizeof( NvRmDiagPowerRailHandle ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( phRailList );
+ return err_;
+}
+
+static NvError NvRmDiagPowerRailGetName_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagPowerRailGetName_in *p_in;
+ NvRmDiagPowerRailGetName_out *p_out;
+
+ p_in = (NvRmDiagPowerRailGetName_in *)InBuffer;
+ p_out = (NvRmDiagPowerRailGetName_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagPowerRailGetName_params, out) - OFFSET(NvRmDiagPowerRailGetName_params, inout));
+
+
+ p_out->ret_ = NvRmDiagPowerRailGetName( p_in->hRail );
+
+ return err_;
+}
+
+static NvError NvRmDiagListPowerRails_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagListPowerRails_in *p_in;
+ NvRmDiagListPowerRails_inout *p_inout;
+ NvRmDiagListPowerRails_out *p_out;
+ NvRmDiagListPowerRails_inout inout;
+ NvRmDiagPowerRailHandle *phRailList = NULL;
+
+ p_in = (NvRmDiagListPowerRails_in *)InBuffer;
+ p_inout = (NvRmDiagListPowerRails_inout *)((NvU8 *)InBuffer + OFFSET(NvRmDiagListPowerRails_params, inout));
+ p_out = (NvRmDiagListPowerRails_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagListPowerRails_params, out) - OFFSET(NvRmDiagListPowerRails_params, inout));
+
+ (void)inout;
+ inout.pListSize = p_inout->pListSize;
+ if( p_inout->pListSize && p_in->phRailList )
+ {
+ phRailList = (NvRmDiagPowerRailHandle *)NvOsAlloc( p_inout->pListSize * sizeof( NvRmDiagPowerRailHandle ) );
+ if( !phRailList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmDiagListPowerRails( &inout.pListSize, phRailList );
+
+
+ p_inout = (NvRmDiagListPowerRails_inout *)OutBuffer;
+ p_inout->pListSize = inout.pListSize;
+ if(p_in->phRailList && phRailList)
+ {
+ err_ = NvOsCopyOut( p_in->phRailList, phRailList, p_inout->pListSize * sizeof( NvRmDiagPowerRailHandle ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( phRailList );
+ return err_;
+}
+
+static NvError NvRmDiagModuleReset_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagModuleReset_in *p_in;
+ NvRmDiagModuleReset_out *p_out;
+
+ p_in = (NvRmDiagModuleReset_in *)InBuffer;
+ p_out = (NvRmDiagModuleReset_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagModuleReset_params, out) - OFFSET(NvRmDiagModuleReset_params, inout));
+
+
+ p_out->ret_ = NvRmDiagModuleReset( p_in->id, p_in->KeepAsserted );
+
+ return err_;
+}
+
+static NvError NvRmDiagClockScalerConfigure_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagClockScalerConfigure_in *p_in;
+ NvRmDiagClockScalerConfigure_out *p_out;
+
+ p_in = (NvRmDiagClockScalerConfigure_in *)InBuffer;
+ p_out = (NvRmDiagClockScalerConfigure_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagClockScalerConfigure_params, out) - OFFSET(NvRmDiagClockScalerConfigure_params, inout));
+
+
+ p_out->ret_ = NvRmDiagClockScalerConfigure( p_in->hScaler, p_in->hInput, p_in->M, p_in->N );
+
+ return err_;
+}
+
+static NvError NvRmDiagPllConfigure_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagPllConfigure_in *p_in;
+ NvRmDiagPllConfigure_out *p_out;
+
+ p_in = (NvRmDiagPllConfigure_in *)InBuffer;
+ p_out = (NvRmDiagPllConfigure_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagPllConfigure_params, out) - OFFSET(NvRmDiagPllConfigure_params, inout));
+
+
+ p_out->ret_ = NvRmDiagPllConfigure( p_in->hPll, p_in->M, p_in->N, p_in->P );
+
+ return err_;
+}
+
+static NvError NvRmDiagOscillatorGetFreq_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagOscillatorGetFreq_in *p_in;
+ NvRmDiagOscillatorGetFreq_out *p_out;
+
+ p_in = (NvRmDiagOscillatorGetFreq_in *)InBuffer;
+ p_out = (NvRmDiagOscillatorGetFreq_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagOscillatorGetFreq_params, out) - OFFSET(NvRmDiagOscillatorGetFreq_params, inout));
+
+
+ p_out->ret_ = NvRmDiagOscillatorGetFreq( p_in->hOscillator );
+
+ return err_;
+}
+
+static NvError NvRmDiagClockSourceListSources_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagClockSourceListSources_in *p_in;
+ NvRmDiagClockSourceListSources_inout *p_inout;
+ NvRmDiagClockSourceListSources_out *p_out;
+ NvRmDiagClockSourceListSources_inout inout;
+ NvRmDiagClockSourceHandle *phSourceList = NULL;
+
+ p_in = (NvRmDiagClockSourceListSources_in *)InBuffer;
+ p_inout = (NvRmDiagClockSourceListSources_inout *)((NvU8 *)InBuffer + OFFSET(NvRmDiagClockSourceListSources_params, inout));
+ p_out = (NvRmDiagClockSourceListSources_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagClockSourceListSources_params, out) - OFFSET(NvRmDiagClockSourceListSources_params, inout));
+
+ (void)inout;
+ inout.pListSize = p_inout->pListSize;
+ if( p_inout->pListSize && p_in->phSourceList )
+ {
+ phSourceList = (NvRmDiagClockSourceHandle *)NvOsAlloc( p_inout->pListSize * sizeof( NvRmDiagClockSourceHandle ) );
+ if( !phSourceList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmDiagClockSourceListSources( p_in->hSource, &inout.pListSize, phSourceList );
+
+
+ p_inout = (NvRmDiagClockSourceListSources_inout *)OutBuffer;
+ p_inout->pListSize = inout.pListSize;
+ if(p_in->phSourceList && phSourceList)
+ {
+ err_ = NvOsCopyOut( p_in->phSourceList, phSourceList, p_inout->pListSize * sizeof( NvRmDiagClockSourceHandle ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( phSourceList );
+ return err_;
+}
+
+static NvError NvRmDiagClockSourceGetScaler_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagClockSourceGetScaler_in *p_in;
+ NvRmDiagClockSourceGetScaler_out *p_out;
+
+ p_in = (NvRmDiagClockSourceGetScaler_in *)InBuffer;
+ p_out = (NvRmDiagClockSourceGetScaler_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagClockSourceGetScaler_params, out) - OFFSET(NvRmDiagClockSourceGetScaler_params, inout));
+
+
+ p_out->ret_ = NvRmDiagClockSourceGetScaler( p_in->hSource );
+
+ return err_;
+}
+
+static NvError NvRmDiagClockSourceGetType_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagClockSourceGetType_in *p_in;
+ NvRmDiagClockSourceGetType_out *p_out;
+
+ p_in = (NvRmDiagClockSourceGetType_in *)InBuffer;
+ p_out = (NvRmDiagClockSourceGetType_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagClockSourceGetType_params, out) - OFFSET(NvRmDiagClockSourceGetType_params, inout));
+
+
+ p_out->ret_ = NvRmDiagClockSourceGetType( p_in->hSource );
+
+ return err_;
+}
+
+static NvError NvRmDiagClockSourceGetName_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagClockSourceGetName_in *p_in;
+ NvRmDiagClockSourceGetName_out *p_out;
+
+ p_in = (NvRmDiagClockSourceGetName_in *)InBuffer;
+ p_out = (NvRmDiagClockSourceGetName_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagClockSourceGetName_params, out) - OFFSET(NvRmDiagClockSourceGetName_params, inout));
+
+
+ p_out->ret_ = NvRmDiagClockSourceGetName( p_in->hSource );
+
+ return err_;
+}
+
+static NvError NvRmDiagModuleClockConfigure_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagModuleClockConfigure_in *p_in;
+ NvRmDiagModuleClockConfigure_out *p_out;
+
+ p_in = (NvRmDiagModuleClockConfigure_in *)InBuffer;
+ p_out = (NvRmDiagModuleClockConfigure_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagModuleClockConfigure_params, out) - OFFSET(NvRmDiagModuleClockConfigure_params, inout));
+
+
+ p_out->ret_ = NvRmDiagModuleClockConfigure( p_in->id, p_in->hSource, p_in->divider, p_in->Source1st );
+
+ return err_;
+}
+
+static NvError NvRmDiagModuleClockEnable_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagModuleClockEnable_in *p_in;
+ NvRmDiagModuleClockEnable_out *p_out;
+
+ p_in = (NvRmDiagModuleClockEnable_in *)InBuffer;
+ p_out = (NvRmDiagModuleClockEnable_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagModuleClockEnable_params, out) - OFFSET(NvRmDiagModuleClockEnable_params, inout));
+
+
+ p_out->ret_ = NvRmDiagModuleClockEnable( p_in->id, p_in->enable );
+
+ return err_;
+}
+
+static NvError NvRmDiagModuleListClockSources_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagModuleListClockSources_in *p_in;
+ NvRmDiagModuleListClockSources_inout *p_inout;
+ NvRmDiagModuleListClockSources_out *p_out;
+ NvRmDiagModuleListClockSources_inout inout;
+ NvRmDiagClockSourceHandle *phSourceList = NULL;
+
+ p_in = (NvRmDiagModuleListClockSources_in *)InBuffer;
+ p_inout = (NvRmDiagModuleListClockSources_inout *)((NvU8 *)InBuffer + OFFSET(NvRmDiagModuleListClockSources_params, inout));
+ p_out = (NvRmDiagModuleListClockSources_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagModuleListClockSources_params, out) - OFFSET(NvRmDiagModuleListClockSources_params, inout));
+
+ (void)inout;
+ inout.pListSize = p_inout->pListSize;
+ if( p_inout->pListSize && p_in->phSourceList )
+ {
+ phSourceList = (NvRmDiagClockSourceHandle *)NvOsAlloc( p_inout->pListSize * sizeof( NvRmDiagClockSourceHandle ) );
+ if( !phSourceList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmDiagModuleListClockSources( p_in->id, &inout.pListSize, phSourceList );
+
+
+ p_inout = (NvRmDiagModuleListClockSources_inout *)OutBuffer;
+ p_inout->pListSize = inout.pListSize;
+ if(p_in->phSourceList && phSourceList)
+ {
+ err_ = NvOsCopyOut( p_in->phSourceList, phSourceList, p_inout->pListSize * sizeof( NvRmDiagClockSourceHandle ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( phSourceList );
+ return err_;
+}
+
+static NvError NvRmDiagListClockSources_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagListClockSources_in *p_in;
+ NvRmDiagListClockSources_inout *p_inout;
+ NvRmDiagListClockSources_out *p_out;
+ NvRmDiagListClockSources_inout inout;
+ NvRmDiagClockSourceHandle *phSourceList = NULL;
+
+ p_in = (NvRmDiagListClockSources_in *)InBuffer;
+ p_inout = (NvRmDiagListClockSources_inout *)((NvU8 *)InBuffer + OFFSET(NvRmDiagListClockSources_params, inout));
+ p_out = (NvRmDiagListClockSources_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagListClockSources_params, out) - OFFSET(NvRmDiagListClockSources_params, inout));
+
+ (void)inout;
+ inout.pListSize = p_inout->pListSize;
+ if( p_inout->pListSize && p_in->phSourceList )
+ {
+ phSourceList = (NvRmDiagClockSourceHandle *)NvOsAlloc( p_inout->pListSize * sizeof( NvRmDiagClockSourceHandle ) );
+ if( !phSourceList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmDiagListClockSources( &inout.pListSize, phSourceList );
+
+
+ p_inout = (NvRmDiagListClockSources_inout *)OutBuffer;
+ p_inout->pListSize = inout.pListSize;
+ if(p_in->phSourceList && phSourceList)
+ {
+ err_ = NvOsCopyOut( p_in->phSourceList, phSourceList, p_inout->pListSize * sizeof( NvRmDiagClockSourceHandle ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( phSourceList );
+ return err_;
+}
+
+static NvError NvRmDiagListModules_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagListModules_in *p_in;
+ NvRmDiagListModules_inout *p_inout;
+ NvRmDiagListModules_out *p_out;
+ NvRmDiagListModules_inout inout;
+ NvRmDiagModuleID *pIdList = NULL;
+
+ p_in = (NvRmDiagListModules_in *)InBuffer;
+ p_inout = (NvRmDiagListModules_inout *)((NvU8 *)InBuffer + OFFSET(NvRmDiagListModules_params, inout));
+ p_out = (NvRmDiagListModules_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagListModules_params, out) - OFFSET(NvRmDiagListModules_params, inout));
+
+ (void)inout;
+ inout.pListSize = p_inout->pListSize;
+ if( p_inout->pListSize && p_in->pIdList )
+ {
+ pIdList = (NvRmDiagModuleID *)NvOsAlloc( p_inout->pListSize * sizeof( NvRmDiagModuleID ) );
+ if( !pIdList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmDiagListModules( &inout.pListSize, pIdList );
+
+
+ p_inout = (NvRmDiagListModules_inout *)OutBuffer;
+ p_inout->pListSize = inout.pListSize;
+ if(p_in->pIdList && pIdList)
+ {
+ err_ = NvOsCopyOut( p_in->pIdList, pIdList, p_inout->pListSize * sizeof( NvRmDiagModuleID ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pIdList );
+ return err_;
+}
+
+static NvError NvRmDiagEnable_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDiagEnable_in *p_in;
+ NvRmDiagEnable_out *p_out;
+
+ p_in = (NvRmDiagEnable_in *)InBuffer;
+ p_out = (NvRmDiagEnable_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDiagEnable_params, out) - OFFSET(NvRmDiagEnable_params, inout));
+
+
+ p_out->ret_ = NvRmDiagEnable( p_in->hDevice );
+
+ return err_;
+}
+
+NvError nvrm_diag_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_diag_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 19:
+ err_ = NvRmDiagGetTemperature_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 18:
+ err_ = NvRmDiagIsLockSupported_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 17:
+ err_ = NvRmDiagConfigurePowerRail_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 16:
+ err_ = NvRmDiagModuleListPowerRails_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 15:
+ err_ = NvRmDiagPowerRailGetName_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 14:
+ err_ = NvRmDiagListPowerRails_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 13:
+ err_ = NvRmDiagModuleReset_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 12:
+ err_ = NvRmDiagClockScalerConfigure_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 11:
+ err_ = NvRmDiagPllConfigure_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 10:
+ err_ = NvRmDiagOscillatorGetFreq_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 9:
+ err_ = NvRmDiagClockSourceListSources_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 8:
+ err_ = NvRmDiagClockSourceGetScaler_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 7:
+ err_ = NvRmDiagClockSourceGetType_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 6:
+ err_ = NvRmDiagClockSourceGetName_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 5:
+ err_ = NvRmDiagModuleClockConfigure_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 4:
+ err_ = NvRmDiagModuleClockEnable_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 3:
+ err_ = NvRmDiagModuleListClockSources_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = NvRmDiagListClockSources_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmDiagListModules_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmDiagEnable_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_dma_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_dma_dispatch.c
new file mode 100644
index 000000000000..ff246393b5ef
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_dma_dispatch.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_dma.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmDmaIsDmaTransferCompletes_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDmaHandle hDma;
+ NvBool IsFirstHalfBuffer;
+} NV_ALIGN(4) NvRmDmaIsDmaTransferCompletes_in;
+
+typedef struct NvRmDmaIsDmaTransferCompletes_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDmaIsDmaTransferCompletes_inout;
+
+typedef struct NvRmDmaIsDmaTransferCompletes_out_t
+{
+ NvBool ret_;
+} NV_ALIGN(4) NvRmDmaIsDmaTransferCompletes_out;
+
+typedef struct NvRmDmaIsDmaTransferCompletes_params_t
+{
+ NvRmDmaIsDmaTransferCompletes_in in;
+ NvRmDmaIsDmaTransferCompletes_inout inout;
+ NvRmDmaIsDmaTransferCompletes_out out;
+} NvRmDmaIsDmaTransferCompletes_params;
+
+typedef struct NvRmDmaGetTransferredCount_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDmaHandle hDma;
+ NvBool IsTransferStop;
+} NV_ALIGN(4) NvRmDmaGetTransferredCount_in;
+
+typedef struct NvRmDmaGetTransferredCount_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDmaGetTransferredCount_inout;
+
+typedef struct NvRmDmaGetTransferredCount_out_t
+{
+ NvError ret_;
+ NvU32 pTransferCount;
+} NV_ALIGN(4) NvRmDmaGetTransferredCount_out;
+
+typedef struct NvRmDmaGetTransferredCount_params_t
+{
+ NvRmDmaGetTransferredCount_in in;
+ NvRmDmaGetTransferredCount_inout inout;
+ NvRmDmaGetTransferredCount_out out;
+} NvRmDmaGetTransferredCount_params;
+
+typedef struct NvRmDmaAbort_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDmaHandle hDma;
+} NV_ALIGN(4) NvRmDmaAbort_in;
+
+typedef struct NvRmDmaAbort_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDmaAbort_inout;
+
+typedef struct NvRmDmaAbort_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDmaAbort_out;
+
+typedef struct NvRmDmaAbort_params_t
+{
+ NvRmDmaAbort_in in;
+ NvRmDmaAbort_inout inout;
+ NvRmDmaAbort_out out;
+} NvRmDmaAbort_params;
+
+typedef struct NvRmDmaStartDmaTransfer_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDmaHandle hDma;
+ NvRmDmaClientBuffer pClientBuffer;
+ NvRmDmaDirection DmaDirection;
+ NvU32 WaitTimeoutInMilliSecond;
+ NvOsSemaphoreHandle AsynchSemaphoreId;
+} NV_ALIGN(4) NvRmDmaStartDmaTransfer_in;
+
+typedef struct NvRmDmaStartDmaTransfer_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDmaStartDmaTransfer_inout;
+
+typedef struct NvRmDmaStartDmaTransfer_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDmaStartDmaTransfer_out;
+
+typedef struct NvRmDmaStartDmaTransfer_params_t
+{
+ NvRmDmaStartDmaTransfer_in in;
+ NvRmDmaStartDmaTransfer_inout inout;
+ NvRmDmaStartDmaTransfer_out out;
+} NvRmDmaStartDmaTransfer_params;
+
+typedef struct NvRmDmaFree_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDmaHandle hDma;
+} NV_ALIGN(4) NvRmDmaFree_in;
+
+typedef struct NvRmDmaFree_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDmaFree_inout;
+
+typedef struct NvRmDmaFree_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDmaFree_out;
+
+typedef struct NvRmDmaFree_params_t
+{
+ NvRmDmaFree_in in;
+ NvRmDmaFree_inout inout;
+ NvRmDmaFree_out out;
+} NvRmDmaFree_params;
+
+typedef struct NvRmDmaAllocate_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvBool Enable32bitSwap;
+ NvRmDmaPriority Priority;
+ NvRmDmaModuleID DmaRequestorModuleId;
+ NvU32 DmaRequestorInstanceId;
+} NV_ALIGN(4) NvRmDmaAllocate_in;
+
+typedef struct NvRmDmaAllocate_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDmaAllocate_inout;
+
+typedef struct NvRmDmaAllocate_out_t
+{
+ NvError ret_;
+ NvRmDmaHandle phDma;
+} NV_ALIGN(4) NvRmDmaAllocate_out;
+
+typedef struct NvRmDmaAllocate_params_t
+{
+ NvRmDmaAllocate_in in;
+ NvRmDmaAllocate_inout inout;
+ NvRmDmaAllocate_out out;
+} NvRmDmaAllocate_params;
+
+typedef struct NvRmDmaGetCapabilities_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvRmDmaCapabilities pRmDmaCaps;
+} NV_ALIGN(4) NvRmDmaGetCapabilities_in;
+
+typedef struct NvRmDmaGetCapabilities_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDmaGetCapabilities_inout;
+
+typedef struct NvRmDmaGetCapabilities_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDmaGetCapabilities_out;
+
+typedef struct NvRmDmaGetCapabilities_params_t
+{
+ NvRmDmaGetCapabilities_in in;
+ NvRmDmaGetCapabilities_inout inout;
+ NvRmDmaGetCapabilities_out out;
+} NvRmDmaGetCapabilities_params;
+
+static NvError NvRmDmaIsDmaTransferCompletes_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDmaIsDmaTransferCompletes_in *p_in;
+ NvRmDmaIsDmaTransferCompletes_out *p_out;
+
+ p_in = (NvRmDmaIsDmaTransferCompletes_in *)InBuffer;
+ p_out = (NvRmDmaIsDmaTransferCompletes_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDmaIsDmaTransferCompletes_params, out) - OFFSET(NvRmDmaIsDmaTransferCompletes_params, inout));
+
+
+ p_out->ret_ = NvRmDmaIsDmaTransferCompletes( p_in->hDma, p_in->IsFirstHalfBuffer );
+
+ return err_;
+}
+
+static NvError NvRmDmaGetTransferredCount_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDmaGetTransferredCount_in *p_in;
+ NvRmDmaGetTransferredCount_out *p_out;
+
+ p_in = (NvRmDmaGetTransferredCount_in *)InBuffer;
+ p_out = (NvRmDmaGetTransferredCount_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDmaGetTransferredCount_params, out) - OFFSET(NvRmDmaGetTransferredCount_params, inout));
+
+
+ p_out->ret_ = NvRmDmaGetTransferredCount( p_in->hDma, &p_out->pTransferCount, p_in->IsTransferStop );
+
+ return err_;
+}
+
+static NvError NvRmDmaAbort_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDmaAbort_in *p_in;
+
+ p_in = (NvRmDmaAbort_in *)InBuffer;
+
+
+ NvRmDmaAbort( p_in->hDma );
+
+ return err_;
+}
+
+static NvError NvRmDmaStartDmaTransfer_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDmaStartDmaTransfer_in *p_in;
+ NvRmDmaStartDmaTransfer_out *p_out;
+ NvOsSemaphoreHandle AsynchSemaphoreId = NULL;
+
+ p_in = (NvRmDmaStartDmaTransfer_in *)InBuffer;
+ p_out = (NvRmDmaStartDmaTransfer_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDmaStartDmaTransfer_params, out) - OFFSET(NvRmDmaStartDmaTransfer_params, inout));
+
+ if( p_in->AsynchSemaphoreId )
+ {
+ err_ = NvOsSemaphoreUnmarshal( p_in->AsynchSemaphoreId, &AsynchSemaphoreId );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmDmaStartDmaTransfer( p_in->hDma, &p_in->pClientBuffer, p_in->DmaDirection, p_in->WaitTimeoutInMilliSecond, AsynchSemaphoreId );
+
+clean:
+ NvOsSemaphoreDestroy( AsynchSemaphoreId );
+ return err_;
+}
+
+static NvError NvRmDmaFree_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDmaFree_in *p_in;
+
+ p_in = (NvRmDmaFree_in *)InBuffer;
+
+
+ NvRmDmaFree( p_in->hDma );
+
+ return err_;
+}
+
+static NvError NvRmDmaAllocate_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDmaAllocate_in *p_in;
+ NvRmDmaAllocate_out *p_out;
+
+ p_in = (NvRmDmaAllocate_in *)InBuffer;
+ p_out = (NvRmDmaAllocate_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDmaAllocate_params, out) - OFFSET(NvRmDmaAllocate_params, inout));
+
+
+ p_out->ret_ = NvRmDmaAllocate( p_in->hRmDevice, &p_out->phDma, p_in->Enable32bitSwap, p_in->Priority, p_in->DmaRequestorModuleId, p_in->DmaRequestorInstanceId );
+
+ return err_;
+}
+
+static NvError NvRmDmaGetCapabilities_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDmaGetCapabilities_in *p_in;
+ NvRmDmaGetCapabilities_out *p_out;
+
+ p_in = (NvRmDmaGetCapabilities_in *)InBuffer;
+ p_out = (NvRmDmaGetCapabilities_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDmaGetCapabilities_params, out) - OFFSET(NvRmDmaGetCapabilities_params, inout));
+
+
+ p_out->ret_ = NvRmDmaGetCapabilities( p_in->hDevice, &p_in->pRmDmaCaps );
+
+ return err_;
+}
+
+NvError nvrm_dma_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_dma_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 6:
+ err_ = NvRmDmaIsDmaTransferCompletes_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 5:
+ err_ = NvRmDmaGetTransferredCount_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 4:
+ err_ = NvRmDmaAbort_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 3:
+ err_ = NvRmDmaStartDmaTransfer_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = NvRmDmaFree_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmDmaAllocate_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmDmaGetCapabilities_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_gpio_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_gpio_dispatch.c
new file mode 100644
index 000000000000..d932c98db502
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_gpio_dispatch.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_gpio.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmGpioGetIrqs_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvRmGpioPinHandle * pin;
+ NvU32 * Irq;
+ NvU32 pinCount;
+} NV_ALIGN(4) NvRmGpioGetIrqs_in;
+
+typedef struct NvRmGpioGetIrqs_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGpioGetIrqs_inout;
+
+typedef struct NvRmGpioGetIrqs_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmGpioGetIrqs_out;
+
+typedef struct NvRmGpioGetIrqs_params_t
+{
+ NvRmGpioGetIrqs_in in;
+ NvRmGpioGetIrqs_inout inout;
+ NvRmGpioGetIrqs_out out;
+} NvRmGpioGetIrqs_params;
+
+typedef struct NvRmGpioConfigPins_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmGpioHandle hGpio;
+ NvRmGpioPinHandle * pin;
+ NvU32 pinCount;
+ NvRmGpioPinMode Mode;
+} NV_ALIGN(4) NvRmGpioConfigPins_in;
+
+typedef struct NvRmGpioConfigPins_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGpioConfigPins_inout;
+
+typedef struct NvRmGpioConfigPins_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmGpioConfigPins_out;
+
+typedef struct NvRmGpioConfigPins_params_t
+{
+ NvRmGpioConfigPins_in in;
+ NvRmGpioConfigPins_inout inout;
+ NvRmGpioConfigPins_out out;
+} NvRmGpioConfigPins_params;
+
+typedef struct NvRmGpioReadPins_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmGpioHandle hGpio;
+ NvRmGpioPinHandle * pin;
+ NvRmGpioPinState * pPinState;
+ NvU32 pinCount;
+} NV_ALIGN(4) NvRmGpioReadPins_in;
+
+typedef struct NvRmGpioReadPins_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGpioReadPins_inout;
+
+typedef struct NvRmGpioReadPins_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGpioReadPins_out;
+
+typedef struct NvRmGpioReadPins_params_t
+{
+ NvRmGpioReadPins_in in;
+ NvRmGpioReadPins_inout inout;
+ NvRmGpioReadPins_out out;
+} NvRmGpioReadPins_params;
+
+typedef struct NvRmGpioWritePins_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmGpioHandle hGpio;
+ NvRmGpioPinHandle * pin;
+ NvRmGpioPinState * pinState;
+ NvU32 pinCount;
+} NV_ALIGN(4) NvRmGpioWritePins_in;
+
+typedef struct NvRmGpioWritePins_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGpioWritePins_inout;
+
+typedef struct NvRmGpioWritePins_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGpioWritePins_out;
+
+typedef struct NvRmGpioWritePins_params_t
+{
+ NvRmGpioWritePins_in in;
+ NvRmGpioWritePins_inout inout;
+ NvRmGpioWritePins_out out;
+} NvRmGpioWritePins_params;
+
+typedef struct NvRmGpioReleasePinHandles_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmGpioHandle hGpio;
+ NvRmGpioPinHandle * hPin;
+ NvU32 pinCount;
+} NV_ALIGN(4) NvRmGpioReleasePinHandles_in;
+
+typedef struct NvRmGpioReleasePinHandles_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGpioReleasePinHandles_inout;
+
+typedef struct NvRmGpioReleasePinHandles_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGpioReleasePinHandles_out;
+
+typedef struct NvRmGpioReleasePinHandles_params_t
+{
+ NvRmGpioReleasePinHandles_in in;
+ NvRmGpioReleasePinHandles_inout inout;
+ NvRmGpioReleasePinHandles_out out;
+} NvRmGpioReleasePinHandles_params;
+
+typedef struct NvRmGpioAcquirePinHandle_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmGpioHandle hGpio;
+ NvU32 port;
+ NvU32 pin;
+} NV_ALIGN(4) NvRmGpioAcquirePinHandle_in;
+
+typedef struct NvRmGpioAcquirePinHandle_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGpioAcquirePinHandle_inout;
+
+typedef struct NvRmGpioAcquirePinHandle_out_t
+{
+ NvError ret_;
+ NvRmGpioPinHandle phPin;
+} NV_ALIGN(4) NvRmGpioAcquirePinHandle_out;
+
+typedef struct NvRmGpioAcquirePinHandle_params_t
+{
+ NvRmGpioAcquirePinHandle_in in;
+ NvRmGpioAcquirePinHandle_inout inout;
+ NvRmGpioAcquirePinHandle_out out;
+} NvRmGpioAcquirePinHandle_params;
+
+typedef struct NvRmGpioClose_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmGpioHandle hGpio;
+} NV_ALIGN(4) NvRmGpioClose_in;
+
+typedef struct NvRmGpioClose_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGpioClose_inout;
+
+typedef struct NvRmGpioClose_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGpioClose_out;
+
+typedef struct NvRmGpioClose_params_t
+{
+ NvRmGpioClose_in in;
+ NvRmGpioClose_inout inout;
+ NvRmGpioClose_out out;
+} NvRmGpioClose_params;
+
+typedef struct NvRmGpioOpen_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+} NV_ALIGN(4) NvRmGpioOpen_in;
+
+typedef struct NvRmGpioOpen_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGpioOpen_inout;
+
+typedef struct NvRmGpioOpen_out_t
+{
+ NvError ret_;
+ NvRmGpioHandle phGpio;
+} NV_ALIGN(4) NvRmGpioOpen_out;
+
+typedef struct NvRmGpioOpen_params_t
+{
+ NvRmGpioOpen_in in;
+ NvRmGpioOpen_inout inout;
+ NvRmGpioOpen_out out;
+} NvRmGpioOpen_params;
+
+static NvError NvRmGpioGetIrqs_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGpioGetIrqs_in *p_in;
+ NvRmGpioGetIrqs_out *p_out;
+ NvRmGpioPinHandle *pin = NULL;
+ NvU32 *Irq = NULL;
+
+ p_in = (NvRmGpioGetIrqs_in *)InBuffer;
+ p_out = (NvRmGpioGetIrqs_out *)((NvU8 *)OutBuffer + OFFSET(NvRmGpioGetIrqs_params, out) - OFFSET(NvRmGpioGetIrqs_params, inout));
+
+ if( p_in->pinCount && p_in->pin )
+ {
+ pin = (NvRmGpioPinHandle *)NvOsAlloc( p_in->pinCount * sizeof( NvRmGpioPinHandle ) );
+ if( !pin )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pin )
+ {
+ err_ = NvOsCopyIn( pin, p_in->pin, p_in->pinCount * sizeof( NvRmGpioPinHandle ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+ if( p_in->pinCount && p_in->Irq )
+ {
+ Irq = (NvU32 *)NvOsAlloc( p_in->pinCount * sizeof( NvU32 ) );
+ if( !Irq )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmGpioGetIrqs( p_in->hRmDevice, pin, Irq, p_in->pinCount );
+
+ if(p_in->Irq && Irq)
+ {
+ err_ = NvOsCopyOut( p_in->Irq, Irq, p_in->pinCount * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pin );
+ NvOsFree( Irq );
+ return err_;
+}
+
+static NvError NvRmGpioConfigPins_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGpioConfigPins_in *p_in;
+ NvRmGpioConfigPins_out *p_out;
+ NvRmGpioPinHandle *pin = NULL;
+
+ p_in = (NvRmGpioConfigPins_in *)InBuffer;
+ p_out = (NvRmGpioConfigPins_out *)((NvU8 *)OutBuffer + OFFSET(NvRmGpioConfigPins_params, out) - OFFSET(NvRmGpioConfigPins_params, inout));
+
+ if( p_in->pinCount && p_in->pin )
+ {
+ pin = (NvRmGpioPinHandle *)NvOsAlloc( p_in->pinCount * sizeof( NvRmGpioPinHandle ) );
+ if( !pin )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pin )
+ {
+ err_ = NvOsCopyIn( pin, p_in->pin, p_in->pinCount * sizeof( NvRmGpioPinHandle ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmGpioConfigPins( p_in->hGpio, pin, p_in->pinCount, p_in->Mode );
+
+clean:
+ NvOsFree( pin );
+ return err_;
+}
+
+static NvError NvRmGpioReadPins_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGpioReadPins_in *p_in;
+ NvRmGpioPinHandle *pin = NULL;
+ NvRmGpioPinState *pPinState = NULL;
+
+ p_in = (NvRmGpioReadPins_in *)InBuffer;
+
+ if( p_in->pinCount && p_in->pin )
+ {
+ pin = (NvRmGpioPinHandle *)NvOsAlloc( p_in->pinCount * sizeof( NvRmGpioPinHandle ) );
+ if( !pin )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pin )
+ {
+ err_ = NvOsCopyIn( pin, p_in->pin, p_in->pinCount * sizeof( NvRmGpioPinHandle ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+ if( p_in->pinCount && p_in->pPinState )
+ {
+ pPinState = (NvRmGpioPinState *)NvOsAlloc( p_in->pinCount * sizeof( NvRmGpioPinState ) );
+ if( !pPinState )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ NvRmGpioReadPins( p_in->hGpio, pin, pPinState, p_in->pinCount );
+
+ if(p_in->pPinState && pPinState)
+ {
+ err_ = NvOsCopyOut( p_in->pPinState, pPinState, p_in->pinCount * sizeof( NvRmGpioPinState ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pin );
+ NvOsFree( pPinState );
+ return err_;
+}
+
+static NvError NvRmGpioWritePins_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGpioWritePins_in *p_in;
+ NvRmGpioPinHandle *pin = NULL;
+ NvRmGpioPinState *pinState = NULL;
+
+ p_in = (NvRmGpioWritePins_in *)InBuffer;
+
+ if( p_in->pinCount && p_in->pin )
+ {
+ pin = (NvRmGpioPinHandle *)NvOsAlloc( p_in->pinCount * sizeof( NvRmGpioPinHandle ) );
+ if( !pin )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pin )
+ {
+ err_ = NvOsCopyIn( pin, p_in->pin, p_in->pinCount * sizeof( NvRmGpioPinHandle ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+ if( p_in->pinCount && p_in->pinState )
+ {
+ pinState = (NvRmGpioPinState *)NvOsAlloc( p_in->pinCount * sizeof( NvRmGpioPinState ) );
+ if( !pinState )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pinState )
+ {
+ err_ = NvOsCopyIn( pinState, p_in->pinState, p_in->pinCount * sizeof( NvRmGpioPinState ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ NvRmGpioWritePins( p_in->hGpio, pin, pinState, p_in->pinCount );
+
+clean:
+ NvOsFree( pin );
+ NvOsFree( pinState );
+ return err_;
+}
+
+static NvError NvRmGpioReleasePinHandles_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGpioReleasePinHandles_in *p_in;
+ NvRmGpioPinHandle *hPin = NULL;
+
+ p_in = (NvRmGpioReleasePinHandles_in *)InBuffer;
+
+ if( p_in->pinCount && p_in->hPin )
+ {
+ hPin = (NvRmGpioPinHandle *)NvOsAlloc( p_in->pinCount * sizeof( NvRmGpioPinHandle ) );
+ if( !hPin )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->hPin )
+ {
+ err_ = NvOsCopyIn( hPin, p_in->hPin, p_in->pinCount * sizeof( NvRmGpioPinHandle ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ NvRmGpioReleasePinHandles( p_in->hGpio, hPin, p_in->pinCount );
+
+clean:
+ NvOsFree( hPin );
+ return err_;
+}
+
+static NvError NvRmGpioAcquirePinHandle_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGpioAcquirePinHandle_in *p_in;
+ NvRmGpioAcquirePinHandle_out *p_out;
+
+ p_in = (NvRmGpioAcquirePinHandle_in *)InBuffer;
+ p_out = (NvRmGpioAcquirePinHandle_out *)((NvU8 *)OutBuffer + OFFSET(NvRmGpioAcquirePinHandle_params, out) - OFFSET(NvRmGpioAcquirePinHandle_params, inout));
+
+
+ p_out->ret_ = NvRmGpioAcquirePinHandle( p_in->hGpio, p_in->port, p_in->pin, &p_out->phPin );
+
+ return err_;
+}
+
+static NvError NvRmGpioClose_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGpioClose_in *p_in;
+
+ p_in = (NvRmGpioClose_in *)InBuffer;
+
+
+ NvRmGpioClose( p_in->hGpio );
+
+ return err_;
+}
+
+static NvError NvRmGpioOpen_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGpioOpen_in *p_in;
+ NvRmGpioOpen_out *p_out;
+
+ p_in = (NvRmGpioOpen_in *)InBuffer;
+ p_out = (NvRmGpioOpen_out *)((NvU8 *)OutBuffer + OFFSET(NvRmGpioOpen_params, out) - OFFSET(NvRmGpioOpen_params, inout));
+
+
+ p_out->ret_ = NvRmGpioOpen( p_in->hRmDevice, &p_out->phGpio );
+
+ return err_;
+}
+
+NvError nvrm_gpio_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_gpio_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 7:
+ err_ = NvRmGpioGetIrqs_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 6:
+ err_ = NvRmGpioConfigPins_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 5:
+ err_ = NvRmGpioReadPins_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 4:
+ err_ = NvRmGpioWritePins_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 3:
+ err_ = NvRmGpioReleasePinHandles_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = NvRmGpioAcquirePinHandle_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmGpioClose_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmGpioOpen_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_i2c_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_i2c_dispatch.c
new file mode 100644
index 000000000000..6e2672dff896
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_i2c_dispatch.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_i2c.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmI2cTransaction_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmI2cHandle hI2c;
+ NvU32 I2cPinMap;
+ NvU32 WaitTimeoutInMilliSeconds;
+ NvU32 ClockSpeedKHz;
+ NvU8 * Data;
+ NvU32 DataLen;
+ NvRmI2cTransactionInfo * Transaction;
+ NvU32 NumOfTransactions;
+} NV_ALIGN(4) NvRmI2cTransaction_in;
+
+typedef struct NvRmI2cTransaction_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmI2cTransaction_inout;
+
+typedef struct NvRmI2cTransaction_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmI2cTransaction_out;
+
+typedef struct NvRmI2cTransaction_params_t
+{
+ NvRmI2cTransaction_in in;
+ NvRmI2cTransaction_inout inout;
+ NvRmI2cTransaction_out out;
+} NvRmI2cTransaction_params;
+
+typedef struct NvRmI2cClose_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmI2cHandle hI2c;
+} NV_ALIGN(4) NvRmI2cClose_in;
+
+typedef struct NvRmI2cClose_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmI2cClose_inout;
+
+typedef struct NvRmI2cClose_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmI2cClose_out;
+
+typedef struct NvRmI2cClose_params_t
+{
+ NvRmI2cClose_in in;
+ NvRmI2cClose_inout inout;
+ NvRmI2cClose_out out;
+} NvRmI2cClose_params;
+
+typedef struct NvRmI2cOpen_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvU32 IoModule;
+ NvU32 instance;
+} NV_ALIGN(4) NvRmI2cOpen_in;
+
+typedef struct NvRmI2cOpen_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmI2cOpen_inout;
+
+typedef struct NvRmI2cOpen_out_t
+{
+ NvError ret_;
+ NvRmI2cHandle phI2c;
+} NV_ALIGN(4) NvRmI2cOpen_out;
+
+typedef struct NvRmI2cOpen_params_t
+{
+ NvRmI2cOpen_in in;
+ NvRmI2cOpen_inout inout;
+ NvRmI2cOpen_out out;
+} NvRmI2cOpen_params;
+
+static NvError NvRmI2cTransaction_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmI2cTransaction_in *p_in;
+ NvRmI2cTransaction_out *p_out;
+ NvU8 *Data = NULL;
+ NvRmI2cTransactionInfo *Transaction = NULL;
+
+ p_in = (NvRmI2cTransaction_in *)InBuffer;
+ p_out = (NvRmI2cTransaction_out *)((NvU8 *)OutBuffer + OFFSET(NvRmI2cTransaction_params, out) - OFFSET(NvRmI2cTransaction_params, inout));
+
+ if( p_in->DataLen && p_in->Data )
+ {
+ Data = (NvU8 *)NvOsAlloc( p_in->DataLen * sizeof( NvU8 ) );
+ if( !Data )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->Data )
+ {
+ err_ = NvOsCopyIn( Data, p_in->Data, p_in->DataLen * sizeof( NvU8 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+ if( p_in->NumOfTransactions && p_in->Transaction )
+ {
+ Transaction = (NvRmI2cTransactionInfo *)NvOsAlloc( p_in->NumOfTransactions * sizeof( NvRmI2cTransactionInfo ) );
+ if( !Transaction )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->Transaction )
+ {
+ err_ = NvOsCopyIn( Transaction, p_in->Transaction, p_in->NumOfTransactions * sizeof( NvRmI2cTransactionInfo ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmI2cTransaction( p_in->hI2c, p_in->I2cPinMap, p_in->WaitTimeoutInMilliSeconds, p_in->ClockSpeedKHz, Data, p_in->DataLen, Transaction, p_in->NumOfTransactions );
+
+ if(p_in->Data && Data)
+ {
+ err_ = NvOsCopyOut( p_in->Data, Data, p_in->DataLen * sizeof( NvU8 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( Data );
+ NvOsFree( Transaction );
+ return err_;
+}
+
+static NvError NvRmI2cClose_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmI2cClose_in *p_in;
+
+ p_in = (NvRmI2cClose_in *)InBuffer;
+
+
+ NvRmI2cClose( p_in->hI2c );
+
+ return err_;
+}
+
+static NvError NvRmI2cOpen_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmI2cOpen_in *p_in;
+ NvRmI2cOpen_out *p_out;
+
+ p_in = (NvRmI2cOpen_in *)InBuffer;
+ p_out = (NvRmI2cOpen_out *)((NvU8 *)OutBuffer + OFFSET(NvRmI2cOpen_params, out) - OFFSET(NvRmI2cOpen_params, inout));
+
+
+ p_out->ret_ = NvRmI2cOpen( p_in->hDevice, p_in->IoModule, p_in->instance, &p_out->phI2c );
+
+ return err_;
+}
+
+NvError nvrm_i2c_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_i2c_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 2:
+ err_ = NvRmI2cTransaction_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmI2cClose_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmI2cOpen_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_init_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_init_dispatch.c
new file mode 100644
index 000000000000..1c438a100957
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_init_dispatch.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_init.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmClose_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+} NV_ALIGN(4) NvRmClose_in;
+
+typedef struct NvRmClose_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmClose_inout;
+
+typedef struct NvRmClose_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmClose_out;
+
+typedef struct NvRmClose_params_t
+{
+ NvRmClose_in in;
+ NvRmClose_inout inout;
+ NvRmClose_out out;
+} NvRmClose_params;
+
+typedef struct NvRmOpenNew_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+} NV_ALIGN(4) NvRmOpenNew_in;
+
+typedef struct NvRmOpenNew_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmOpenNew_inout;
+
+typedef struct NvRmOpenNew_out_t
+{
+ NvError ret_;
+ NvRmDeviceHandle pHandle;
+} NV_ALIGN(4) NvRmOpenNew_out;
+
+typedef struct NvRmOpenNew_params_t
+{
+ NvRmOpenNew_in in;
+ NvRmOpenNew_inout inout;
+ NvRmOpenNew_out out;
+} NvRmOpenNew_params;
+
+typedef struct NvRmInit_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+} NV_ALIGN(4) NvRmInit_in;
+
+typedef struct NvRmInit_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmInit_inout;
+
+typedef struct NvRmInit_out_t
+{
+ NvRmDeviceHandle pHandle;
+} NV_ALIGN(4) NvRmInit_out;
+
+typedef struct NvRmInit_params_t
+{
+ NvRmInit_in in;
+ NvRmInit_inout inout;
+ NvRmInit_out out;
+} NvRmInit_params;
+
+typedef struct NvRmOpen_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvU32 DeviceId;
+} NV_ALIGN(4) NvRmOpen_in;
+
+typedef struct NvRmOpen_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmOpen_inout;
+
+typedef struct NvRmOpen_out_t
+{
+ NvError ret_;
+ NvRmDeviceHandle pHandle;
+} NV_ALIGN(4) NvRmOpen_out;
+
+typedef struct NvRmOpen_params_t
+{
+ NvRmOpen_in in;
+ NvRmOpen_inout inout;
+ NvRmOpen_out out;
+} NvRmOpen_params;
+
+static NvError NvRmClose_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmClose_in *p_in;
+
+ p_in = (NvRmClose_in *)InBuffer;
+
+
+ NvRmClose( p_in->hDevice );
+
+ return err_;
+}
+
+static NvError NvRmOpenNew_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmOpenNew_in *p_in;
+ NvRmOpenNew_out *p_out;
+
+ p_in = (NvRmOpenNew_in *)InBuffer;
+ p_out = (NvRmOpenNew_out *)((NvU8 *)OutBuffer + OFFSET(NvRmOpenNew_params, out) - OFFSET(NvRmOpenNew_params, inout));
+
+
+ p_out->ret_ = NvRmOpenNew( &p_out->pHandle );
+
+ return err_;
+}
+
+static NvError NvRmInit_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmInit_in *p_in;
+ NvRmInit_out *p_out;
+
+ p_in = (NvRmInit_in *)InBuffer;
+ p_out = (NvRmInit_out *)((NvU8 *)OutBuffer + OFFSET(NvRmInit_params, out) - OFFSET(NvRmInit_params, inout));
+
+
+ NvRmInit( &p_out->pHandle );
+
+ return err_;
+}
+
+static NvError NvRmOpen_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmOpen_in *p_in;
+ NvRmOpen_out *p_out;
+
+ p_in = (NvRmOpen_in *)InBuffer;
+ p_out = (NvRmOpen_out *)((NvU8 *)OutBuffer + OFFSET(NvRmOpen_params, out) - OFFSET(NvRmOpen_params, inout));
+
+
+ p_out->ret_ = NvRmOpen( &p_out->pHandle, p_in->DeviceId );
+
+ return err_;
+}
+
+NvError nvrm_init_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_init_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 3:
+ err_ = NvRmClose_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = NvRmOpenNew_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmInit_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmOpen_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_interrupt_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_interrupt_dispatch.c
new file mode 100644
index 000000000000..007117333979
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_interrupt_dispatch.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_interrupt.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmGetIrqCountForLogicalInterrupt_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvRmModuleID ModuleID;
+} NV_ALIGN(4) NvRmGetIrqCountForLogicalInterrupt_in;
+
+typedef struct NvRmGetIrqCountForLogicalInterrupt_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGetIrqCountForLogicalInterrupt_inout;
+
+typedef struct NvRmGetIrqCountForLogicalInterrupt_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmGetIrqCountForLogicalInterrupt_out;
+
+typedef struct NvRmGetIrqCountForLogicalInterrupt_params_t
+{
+ NvRmGetIrqCountForLogicalInterrupt_in in;
+ NvRmGetIrqCountForLogicalInterrupt_inout inout;
+ NvRmGetIrqCountForLogicalInterrupt_out out;
+} NvRmGetIrqCountForLogicalInterrupt_params;
+
+typedef struct NvRmGetIrqForLogicalInterrupt_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvRmModuleID ModuleID;
+ NvU32 Index;
+} NV_ALIGN(4) NvRmGetIrqForLogicalInterrupt_in;
+
+typedef struct NvRmGetIrqForLogicalInterrupt_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGetIrqForLogicalInterrupt_inout;
+
+typedef struct NvRmGetIrqForLogicalInterrupt_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmGetIrqForLogicalInterrupt_out;
+
+typedef struct NvRmGetIrqForLogicalInterrupt_params_t
+{
+ NvRmGetIrqForLogicalInterrupt_in in;
+ NvRmGetIrqForLogicalInterrupt_inout inout;
+ NvRmGetIrqForLogicalInterrupt_out out;
+} NvRmGetIrqForLogicalInterrupt_params;
+
+static NvError NvRmGetIrqCountForLogicalInterrupt_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGetIrqCountForLogicalInterrupt_in *p_in;
+ NvRmGetIrqCountForLogicalInterrupt_out *p_out;
+
+ p_in = (NvRmGetIrqCountForLogicalInterrupt_in *)InBuffer;
+ p_out = (NvRmGetIrqCountForLogicalInterrupt_out *)((NvU8 *)OutBuffer + OFFSET(NvRmGetIrqCountForLogicalInterrupt_params, out) - OFFSET(NvRmGetIrqCountForLogicalInterrupt_params, inout));
+
+
+ p_out->ret_ = NvRmGetIrqCountForLogicalInterrupt( p_in->hRmDevice, p_in->ModuleID );
+
+ return err_;
+}
+
+static NvError NvRmGetIrqForLogicalInterrupt_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGetIrqForLogicalInterrupt_in *p_in;
+ NvRmGetIrqForLogicalInterrupt_out *p_out;
+
+ p_in = (NvRmGetIrqForLogicalInterrupt_in *)InBuffer;
+ p_out = (NvRmGetIrqForLogicalInterrupt_out *)((NvU8 *)OutBuffer + OFFSET(NvRmGetIrqForLogicalInterrupt_params, out) - OFFSET(NvRmGetIrqForLogicalInterrupt_params, inout));
+
+
+ p_out->ret_ = NvRmGetIrqForLogicalInterrupt( p_in->hRmDevice, p_in->ModuleID, p_in->Index );
+
+ return err_;
+}
+
+NvError nvrm_interrupt_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_interrupt_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 1:
+ err_ = NvRmGetIrqCountForLogicalInterrupt_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmGetIrqForLogicalInterrupt_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_keylist_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_keylist_dispatch.c
new file mode 100644
index 000000000000..4e242392cd4a
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_keylist_dispatch.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_keylist.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmSetKeyValuePair_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRm;
+ NvU32 KeyID;
+ NvU32 Value;
+} NV_ALIGN(4) NvRmSetKeyValuePair_in;
+
+typedef struct NvRmSetKeyValuePair_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmSetKeyValuePair_inout;
+
+typedef struct NvRmSetKeyValuePair_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmSetKeyValuePair_out;
+
+typedef struct NvRmSetKeyValuePair_params_t
+{
+ NvRmSetKeyValuePair_in in;
+ NvRmSetKeyValuePair_inout inout;
+ NvRmSetKeyValuePair_out out;
+} NvRmSetKeyValuePair_params;
+
+typedef struct NvRmGetKeyValue_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRm;
+ NvU32 KeyID;
+} NV_ALIGN(4) NvRmGetKeyValue_in;
+
+typedef struct NvRmGetKeyValue_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGetKeyValue_inout;
+
+typedef struct NvRmGetKeyValue_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmGetKeyValue_out;
+
+typedef struct NvRmGetKeyValue_params_t
+{
+ NvRmGetKeyValue_in in;
+ NvRmGetKeyValue_inout inout;
+ NvRmGetKeyValue_out out;
+} NvRmGetKeyValue_params;
+
+static NvError NvRmSetKeyValuePair_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmSetKeyValuePair_in *p_in;
+ NvRmSetKeyValuePair_out *p_out;
+
+ p_in = (NvRmSetKeyValuePair_in *)InBuffer;
+ p_out = (NvRmSetKeyValuePair_out *)((NvU8 *)OutBuffer + OFFSET(NvRmSetKeyValuePair_params, out) - OFFSET(NvRmSetKeyValuePair_params, inout));
+
+
+ p_out->ret_ = NvRmSetKeyValuePair( p_in->hRm, p_in->KeyID, p_in->Value );
+
+ return err_;
+}
+
+static NvError NvRmGetKeyValue_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGetKeyValue_in *p_in;
+ NvRmGetKeyValue_out *p_out;
+
+ p_in = (NvRmGetKeyValue_in *)InBuffer;
+ p_out = (NvRmGetKeyValue_out *)((NvU8 *)OutBuffer + OFFSET(NvRmGetKeyValue_params, out) - OFFSET(NvRmGetKeyValue_params, inout));
+
+
+ p_out->ret_ = NvRmGetKeyValue( p_in->hRm, p_in->KeyID );
+
+ return err_;
+}
+
+NvError nvrm_keylist_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_keylist_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 1:
+ err_ = NvRmSetKeyValuePair_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmGetKeyValue_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_memctrl_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_memctrl_dispatch.c
new file mode 100644
index 000000000000..b2a4f77a8f67
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_memctrl_dispatch.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_memctrl.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmCorePerfMonStop_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvU32 * pCountList;
+} NV_ALIGN(4) NvRmCorePerfMonStop_in;
+
+typedef struct NvRmCorePerfMonStop_inout_t
+{
+ NvU32 pCountListSize;
+} NV_ALIGN(4) NvRmCorePerfMonStop_inout;
+
+typedef struct NvRmCorePerfMonStop_out_t
+{
+ NvError ret_;
+ NvU32 pTotalCycleCount;
+} NV_ALIGN(4) NvRmCorePerfMonStop_out;
+
+typedef struct NvRmCorePerfMonStop_params_t
+{
+ NvRmCorePerfMonStop_in in;
+ NvRmCorePerfMonStop_inout inout;
+ NvRmCorePerfMonStop_out out;
+} NvRmCorePerfMonStop_params;
+
+typedef struct NvRmCorePerfMonStart_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvU32 * pEventList;
+} NV_ALIGN(4) NvRmCorePerfMonStart_in;
+
+typedef struct NvRmCorePerfMonStart_inout_t
+{
+ NvU32 pEventListSize;
+} NV_ALIGN(4) NvRmCorePerfMonStart_inout;
+
+typedef struct NvRmCorePerfMonStart_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmCorePerfMonStart_out;
+
+typedef struct NvRmCorePerfMonStart_params_t
+{
+ NvRmCorePerfMonStart_in in;
+ NvRmCorePerfMonStart_inout inout;
+ NvRmCorePerfMonStart_out out;
+} NvRmCorePerfMonStart_params;
+
+typedef struct ReadObsData_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle rm;
+ NvRmModuleID modId;
+ NvU32 start_index;
+ NvU32 length;
+} NV_ALIGN(4) ReadObsData_in;
+
+typedef struct ReadObsData_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) ReadObsData_inout;
+
+typedef struct ReadObsData_out_t
+{
+ NvError ret_;
+ NvU32 value;
+} NV_ALIGN(4) ReadObsData_out;
+
+typedef struct ReadObsData_params_t
+{
+ ReadObsData_in in;
+ ReadObsData_inout inout;
+ ReadObsData_out out;
+} ReadObsData_params;
+
+typedef struct McStat_Report_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvU32 client_id_0;
+ NvU32 client_0_cycles;
+ NvU32 client_id_1;
+ NvU32 client_1_cycles;
+ NvU32 llc_client_id;
+ NvU32 llc_client_clocks;
+ NvU32 llc_client_cycles;
+ NvU32 mc_clocks;
+} NV_ALIGN(4) McStat_Report_in;
+
+typedef struct McStat_Report_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) McStat_Report_inout;
+
+typedef struct McStat_Report_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) McStat_Report_out;
+
+typedef struct McStat_Report_params_t
+{
+ McStat_Report_in in;
+ McStat_Report_inout inout;
+ McStat_Report_out out;
+} McStat_Report_params;
+
+typedef struct McStat_Stop_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle rm;
+} NV_ALIGN(4) McStat_Stop_in;
+
+typedef struct McStat_Stop_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) McStat_Stop_inout;
+
+typedef struct McStat_Stop_out_t
+{
+ NvU32 client_0_cycles;
+ NvU32 client_1_cycles;
+ NvU32 llc_client_cycles;
+ NvU32 llc_client_clocks;
+ NvU32 mc_clocks;
+} NV_ALIGN(4) McStat_Stop_out;
+
+typedef struct McStat_Stop_params_t
+{
+ McStat_Stop_in in;
+ McStat_Stop_inout inout;
+ McStat_Stop_out out;
+} McStat_Stop_params;
+
+typedef struct McStat_Start_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle rm;
+ NvU32 client_id_0;
+ NvU32 client_id_1;
+ NvU32 llc_client_id;
+} NV_ALIGN(4) McStat_Start_in;
+
+typedef struct McStat_Start_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) McStat_Start_inout;
+
+typedef struct McStat_Start_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) McStat_Start_out;
+
+typedef struct McStat_Start_params_t
+{
+ McStat_Start_in in;
+ McStat_Start_inout inout;
+ McStat_Start_out out;
+} McStat_Start_params;
+
+static NvError NvRmCorePerfMonStop_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmCorePerfMonStop_in *p_in;
+ NvRmCorePerfMonStop_inout *p_inout;
+ NvRmCorePerfMonStop_out *p_out;
+ NvRmCorePerfMonStop_inout inout;
+ NvU32 *pCountList = NULL;
+
+ p_in = (NvRmCorePerfMonStop_in *)InBuffer;
+ p_inout = (NvRmCorePerfMonStop_inout *)((NvU8 *)InBuffer + OFFSET(NvRmCorePerfMonStop_params, inout));
+ p_out = (NvRmCorePerfMonStop_out *)((NvU8 *)OutBuffer + OFFSET(NvRmCorePerfMonStop_params, out) - OFFSET(NvRmCorePerfMonStop_params, inout));
+
+ (void)inout;
+ inout.pCountListSize = p_inout->pCountListSize;
+ if( p_inout->pCountListSize && p_in->pCountList )
+ {
+ pCountList = (NvU32 *)NvOsAlloc( p_inout->pCountListSize * sizeof( NvU32 ) );
+ if( !pCountList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmCorePerfMonStop( p_in->hRmDevice, &inout.pCountListSize, pCountList, &p_out->pTotalCycleCount );
+
+
+ p_inout = (NvRmCorePerfMonStop_inout *)OutBuffer;
+ p_inout->pCountListSize = inout.pCountListSize;
+ if(p_in->pCountList && pCountList)
+ {
+ err_ = NvOsCopyOut( p_in->pCountList, pCountList, p_inout->pCountListSize * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pCountList );
+ return err_;
+}
+
+static NvError NvRmCorePerfMonStart_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmCorePerfMonStart_in *p_in;
+ NvRmCorePerfMonStart_inout *p_inout;
+ NvRmCorePerfMonStart_out *p_out;
+ NvRmCorePerfMonStart_inout inout;
+ NvU32 *pEventList = NULL;
+
+ p_in = (NvRmCorePerfMonStart_in *)InBuffer;
+ p_inout = (NvRmCorePerfMonStart_inout *)((NvU8 *)InBuffer + OFFSET(NvRmCorePerfMonStart_params, inout));
+ p_out = (NvRmCorePerfMonStart_out *)((NvU8 *)OutBuffer + OFFSET(NvRmCorePerfMonStart_params, out) - OFFSET(NvRmCorePerfMonStart_params, inout));
+
+ (void)inout;
+ inout.pEventListSize = p_inout->pEventListSize;
+ if( p_inout->pEventListSize && p_in->pEventList )
+ {
+ pEventList = (NvU32 *)NvOsAlloc( p_inout->pEventListSize * sizeof( NvU32 ) );
+ if( !pEventList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pEventList )
+ {
+ err_ = NvOsCopyIn( pEventList, p_in->pEventList, p_inout->pEventListSize * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmCorePerfMonStart( p_in->hRmDevice, &inout.pEventListSize, pEventList );
+
+
+ p_inout = (NvRmCorePerfMonStart_inout *)OutBuffer;
+ p_inout->pEventListSize = inout.pEventListSize;
+clean:
+ NvOsFree( pEventList );
+ return err_;
+}
+
+static NvError ReadObsData_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ ReadObsData_in *p_in;
+ ReadObsData_out *p_out;
+
+ p_in = (ReadObsData_in *)InBuffer;
+ p_out = (ReadObsData_out *)((NvU8 *)OutBuffer + OFFSET(ReadObsData_params, out) - OFFSET(ReadObsData_params, inout));
+
+
+ p_out->ret_ = ReadObsData( p_in->rm, p_in->modId, p_in->start_index, p_in->length, &p_out->value );
+
+ return err_;
+}
+
+static NvError McStat_Report_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ McStat_Report_in *p_in;
+
+ p_in = (McStat_Report_in *)InBuffer;
+
+
+ McStat_Report( p_in->client_id_0, p_in->client_0_cycles, p_in->client_id_1, p_in->client_1_cycles, p_in->llc_client_id, p_in->llc_client_clocks, p_in->llc_client_cycles, p_in->mc_clocks );
+
+ return err_;
+}
+
+static NvError McStat_Stop_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ McStat_Stop_in *p_in;
+ McStat_Stop_out *p_out;
+
+ p_in = (McStat_Stop_in *)InBuffer;
+ p_out = (McStat_Stop_out *)((NvU8 *)OutBuffer + OFFSET(McStat_Stop_params, out) - OFFSET(McStat_Stop_params, inout));
+
+
+ McStat_Stop( p_in->rm, &p_out->client_0_cycles, &p_out->client_1_cycles, &p_out->llc_client_cycles, &p_out->llc_client_clocks, &p_out->mc_clocks );
+
+ return err_;
+}
+
+static NvError McStat_Start_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ McStat_Start_in *p_in;
+
+ p_in = (McStat_Start_in *)InBuffer;
+
+
+ McStat_Start( p_in->rm, p_in->client_id_0, p_in->client_id_1, p_in->llc_client_id );
+
+ return err_;
+}
+
+NvError nvrm_memctrl_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_memctrl_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 5:
+ err_ = NvRmCorePerfMonStop_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 4:
+ err_ = NvRmCorePerfMonStart_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 3:
+ err_ = ReadObsData_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = McStat_Report_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = McStat_Stop_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = McStat_Start_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_memmgr_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_memmgr_dispatch.c
new file mode 100644
index 000000000000..57b08df02223
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_memmgr_dispatch.c
@@ -0,0 +1,941 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_memmgr.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmMemGetStat_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemStat Stat;
+} NV_ALIGN(4) NvRmMemGetStat_in;
+
+typedef struct NvRmMemGetStat_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemGetStat_inout;
+
+typedef struct NvRmMemGetStat_out_t
+{
+ NvError ret_;
+ NvS32 Result;
+} NV_ALIGN(4) NvRmMemGetStat_out;
+
+typedef struct NvRmMemGetStat_params_t
+{
+ NvRmMemGetStat_in in;
+ NvRmMemGetStat_inout inout;
+ NvRmMemGetStat_out out;
+} NvRmMemGetStat_params;
+
+typedef struct NvRmMemHandleFromId_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvU32 id;
+} NV_ALIGN(4) NvRmMemHandleFromId_in;
+
+typedef struct NvRmMemHandleFromId_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemHandleFromId_inout;
+
+typedef struct NvRmMemHandleFromId_out_t
+{
+ NvError ret_;
+ NvRmMemHandle hMem;
+} NV_ALIGN(4) NvRmMemHandleFromId_out;
+
+typedef struct NvRmMemHandleFromId_params_t
+{
+ NvRmMemHandleFromId_in in;
+ NvRmMemHandleFromId_inout inout;
+ NvRmMemHandleFromId_out out;
+} NvRmMemHandleFromId_params;
+
+typedef struct NvRmMemGetId_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle hMem;
+} NV_ALIGN(4) NvRmMemGetId_in;
+
+typedef struct NvRmMemGetId_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemGetId_inout;
+
+typedef struct NvRmMemGetId_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmMemGetId_out;
+
+typedef struct NvRmMemGetId_params_t
+{
+ NvRmMemGetId_in in;
+ NvRmMemGetId_inout inout;
+ NvRmMemGetId_out out;
+} NvRmMemGetId_params;
+
+typedef struct NvRmMemGetHeapType_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle hMem;
+} NV_ALIGN(4) NvRmMemGetHeapType_in;
+
+typedef struct NvRmMemGetHeapType_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemGetHeapType_inout;
+
+typedef struct NvRmMemGetHeapType_out_t
+{
+ NvRmHeap ret_;
+ NvU32 BasePhysAddr;
+} NV_ALIGN(4) NvRmMemGetHeapType_out;
+
+typedef struct NvRmMemGetHeapType_params_t
+{
+ NvRmMemGetHeapType_in in;
+ NvRmMemGetHeapType_inout inout;
+ NvRmMemGetHeapType_out out;
+} NvRmMemGetHeapType_params;
+
+typedef struct NvRmMemGetCacheLineSize_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+} NV_ALIGN(4) NvRmMemGetCacheLineSize_in;
+
+typedef struct NvRmMemGetCacheLineSize_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemGetCacheLineSize_inout;
+
+typedef struct NvRmMemGetCacheLineSize_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmMemGetCacheLineSize_out;
+
+typedef struct NvRmMemGetCacheLineSize_params_t
+{
+ NvRmMemGetCacheLineSize_in in;
+ NvRmMemGetCacheLineSize_inout inout;
+ NvRmMemGetCacheLineSize_out out;
+} NvRmMemGetCacheLineSize_params;
+
+typedef struct NvRmMemGetAlignment_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle hMem;
+} NV_ALIGN(4) NvRmMemGetAlignment_in;
+
+typedef struct NvRmMemGetAlignment_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemGetAlignment_inout;
+
+typedef struct NvRmMemGetAlignment_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmMemGetAlignment_out;
+
+typedef struct NvRmMemGetAlignment_params_t
+{
+ NvRmMemGetAlignment_in in;
+ NvRmMemGetAlignment_inout inout;
+ NvRmMemGetAlignment_out out;
+} NvRmMemGetAlignment_params;
+
+typedef struct NvRmMemGetSize_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle hMem;
+} NV_ALIGN(4) NvRmMemGetSize_in;
+
+typedef struct NvRmMemGetSize_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemGetSize_inout;
+
+typedef struct NvRmMemGetSize_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmMemGetSize_out;
+
+typedef struct NvRmMemGetSize_params_t
+{
+ NvRmMemGetSize_in in;
+ NvRmMemGetSize_inout inout;
+ NvRmMemGetSize_out out;
+} NvRmMemGetSize_params;
+
+typedef struct NvRmMemMove_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle hDstMem;
+ NvU32 DstOffset;
+ NvRmMemHandle hSrcMem;
+ NvU32 SrcOffset;
+ NvU32 Size;
+} NV_ALIGN(4) NvRmMemMove_in;
+
+typedef struct NvRmMemMove_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemMove_inout;
+
+typedef struct NvRmMemMove_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemMove_out;
+
+typedef struct NvRmMemMove_params_t
+{
+ NvRmMemMove_in in;
+ NvRmMemMove_inout inout;
+ NvRmMemMove_out out;
+} NvRmMemMove_params;
+
+typedef struct NvRmMemUnpinMult_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle * hMems;
+ NvU32 Count;
+} NV_ALIGN(4) NvRmMemUnpinMult_in;
+
+typedef struct NvRmMemUnpinMult_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemUnpinMult_inout;
+
+typedef struct NvRmMemUnpinMult_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemUnpinMult_out;
+
+typedef struct NvRmMemUnpinMult_params_t
+{
+ NvRmMemUnpinMult_in in;
+ NvRmMemUnpinMult_inout inout;
+ NvRmMemUnpinMult_out out;
+} NvRmMemUnpinMult_params;
+
+typedef struct NvRmMemUnpin_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle hMem;
+} NV_ALIGN(4) NvRmMemUnpin_in;
+
+typedef struct NvRmMemUnpin_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemUnpin_inout;
+
+typedef struct NvRmMemUnpin_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemUnpin_out;
+
+typedef struct NvRmMemUnpin_params_t
+{
+ NvRmMemUnpin_in in;
+ NvRmMemUnpin_inout inout;
+ NvRmMemUnpin_out out;
+} NvRmMemUnpin_params;
+
+typedef struct NvRmMemGetAddress_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle hMem;
+ NvU32 Offset;
+} NV_ALIGN(4) NvRmMemGetAddress_in;
+
+typedef struct NvRmMemGetAddress_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemGetAddress_inout;
+
+typedef struct NvRmMemGetAddress_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmMemGetAddress_out;
+
+typedef struct NvRmMemGetAddress_params_t
+{
+ NvRmMemGetAddress_in in;
+ NvRmMemGetAddress_inout inout;
+ NvRmMemGetAddress_out out;
+} NvRmMemGetAddress_params;
+
+typedef struct NvRmMemPinMult_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle * hMems;
+ NvU32 * Addrs;
+ NvU32 Count;
+} NV_ALIGN(4) NvRmMemPinMult_in;
+
+typedef struct NvRmMemPinMult_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemPinMult_inout;
+
+typedef struct NvRmMemPinMult_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemPinMult_out;
+
+typedef struct NvRmMemPinMult_params_t
+{
+ NvRmMemPinMult_in in;
+ NvRmMemPinMult_inout inout;
+ NvRmMemPinMult_out out;
+} NvRmMemPinMult_params;
+
+typedef struct NvRmMemPin_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle hMem;
+} NV_ALIGN(4) NvRmMemPin_in;
+
+typedef struct NvRmMemPin_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemPin_inout;
+
+typedef struct NvRmMemPin_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmMemPin_out;
+
+typedef struct NvRmMemPin_params_t
+{
+ NvRmMemPin_in in;
+ NvRmMemPin_inout inout;
+ NvRmMemPin_out out;
+} NvRmMemPin_params;
+
+typedef struct NvRmMemAlloc_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle hMem;
+ NvRmHeap * Heaps;
+ NvU32 NumHeaps;
+ NvU32 Alignment;
+ NvOsMemAttribute Coherency;
+} NV_ALIGN(4) NvRmMemAlloc_in;
+
+typedef struct NvRmMemAlloc_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemAlloc_inout;
+
+typedef struct NvRmMemAlloc_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmMemAlloc_out;
+
+typedef struct NvRmMemAlloc_params_t
+{
+ NvRmMemAlloc_in in;
+ NvRmMemAlloc_inout inout;
+ NvRmMemAlloc_out out;
+} NvRmMemAlloc_params;
+
+typedef struct NvRmMemHandleFree_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle hMem;
+} NV_ALIGN(4) NvRmMemHandleFree_in;
+
+typedef struct NvRmMemHandleFree_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemHandleFree_inout;
+
+typedef struct NvRmMemHandleFree_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemHandleFree_out;
+
+typedef struct NvRmMemHandleFree_params_t
+{
+ NvRmMemHandleFree_in in;
+ NvRmMemHandleFree_inout inout;
+ NvRmMemHandleFree_out out;
+} NvRmMemHandleFree_params;
+
+typedef struct NvRmMemHandlePreserveHandle_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmMemHandle hMem;
+} NV_ALIGN(4) NvRmMemHandlePreserveHandle_in;
+
+typedef struct NvRmMemHandlePreserveHandle_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemHandlePreserveHandle_inout;
+
+typedef struct NvRmMemHandlePreserveHandle_out_t
+{
+ NvError ret_;
+ NvU32 Key;
+} NV_ALIGN(4) NvRmMemHandlePreserveHandle_out;
+
+typedef struct NvRmMemHandlePreserveHandle_params_t
+{
+ NvRmMemHandlePreserveHandle_in in;
+ NvRmMemHandlePreserveHandle_inout inout;
+ NvRmMemHandlePreserveHandle_out out;
+} NvRmMemHandlePreserveHandle_params;
+
+typedef struct NvRmMemHandleClaimPreservedHandle_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvU32 Key;
+} NV_ALIGN(4) NvRmMemHandleClaimPreservedHandle_in;
+
+typedef struct NvRmMemHandleClaimPreservedHandle_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemHandleClaimPreservedHandle_inout;
+
+typedef struct NvRmMemHandleClaimPreservedHandle_out_t
+{
+ NvError ret_;
+ NvRmMemHandle phMem;
+} NV_ALIGN(4) NvRmMemHandleClaimPreservedHandle_out;
+
+typedef struct NvRmMemHandleClaimPreservedHandle_params_t
+{
+ NvRmMemHandleClaimPreservedHandle_in in;
+ NvRmMemHandleClaimPreservedHandle_inout inout;
+ NvRmMemHandleClaimPreservedHandle_out out;
+} NvRmMemHandleClaimPreservedHandle_params;
+
+typedef struct NvRmMemHandleCreate_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvU32 Size;
+} NV_ALIGN(4) NvRmMemHandleCreate_in;
+
+typedef struct NvRmMemHandleCreate_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMemHandleCreate_inout;
+
+typedef struct NvRmMemHandleCreate_out_t
+{
+ NvError ret_;
+ NvRmMemHandle phMem;
+} NV_ALIGN(4) NvRmMemHandleCreate_out;
+
+typedef struct NvRmMemHandleCreate_params_t
+{
+ NvRmMemHandleCreate_in in;
+ NvRmMemHandleCreate_inout inout;
+ NvRmMemHandleCreate_out out;
+} NvRmMemHandleCreate_params;
+
+static NvError NvRmMemGetStat_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemGetStat_in *p_in;
+ NvRmMemGetStat_out *p_out;
+
+ p_in = (NvRmMemGetStat_in *)InBuffer;
+ p_out = (NvRmMemGetStat_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemGetStat_params, out) - OFFSET(NvRmMemGetStat_params, inout));
+
+
+ p_out->ret_ = NvRmMemGetStat( p_in->Stat, &p_out->Result );
+
+ return err_;
+}
+
+static NvError NvRmMemHandleFromId_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemHandleFromId_in *p_in;
+ NvRmMemHandleFromId_out *p_out;
+ NvRtObjRefHandle ref_hMem = 0;
+
+ p_in = (NvRmMemHandleFromId_in *)InBuffer;
+ p_out = (NvRmMemHandleFromId_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemHandleFromId_params, out) - OFFSET(NvRmMemHandleFromId_params, inout));
+
+ err_ = NvRtAllocObjRef(Ctx, &ref_hMem);
+ if (err_ != NvSuccess)
+ {
+ goto clean;
+ }
+
+ p_out->ret_ = NvRmMemHandleFromId( p_in->id, &p_out->hMem );
+
+ if ( p_out->ret_ == NvSuccess )
+ {
+ NvRtStoreObjRef(Ctx, ref_hMem, NvRtObjType_NvRm_NvRmMemHandle, p_out->hMem);
+ ref_hMem = 0;
+ }
+clean:
+ if (ref_hMem) NvRtDiscardObjRef(Ctx, ref_hMem);
+ return err_;
+}
+
+static NvError NvRmMemGetId_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemGetId_in *p_in;
+ NvRmMemGetId_out *p_out;
+
+ p_in = (NvRmMemGetId_in *)InBuffer;
+ p_out = (NvRmMemGetId_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemGetId_params, out) - OFFSET(NvRmMemGetId_params, inout));
+
+
+ p_out->ret_ = NvRmMemGetId( p_in->hMem );
+
+ return err_;
+}
+
+static NvError NvRmMemGetHeapType_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemGetHeapType_in *p_in;
+ NvRmMemGetHeapType_out *p_out;
+
+ p_in = (NvRmMemGetHeapType_in *)InBuffer;
+ p_out = (NvRmMemGetHeapType_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemGetHeapType_params, out) - OFFSET(NvRmMemGetHeapType_params, inout));
+
+
+ p_out->ret_ = NvRmMemGetHeapType( p_in->hMem, &p_out->BasePhysAddr );
+
+ return err_;
+}
+
+static NvError NvRmMemGetCacheLineSize_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemGetCacheLineSize_out *p_out;
+ p_out = (NvRmMemGetCacheLineSize_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemGetCacheLineSize_params, out) - OFFSET(NvRmMemGetCacheLineSize_params, inout));
+
+
+ p_out->ret_ = NvRmMemGetCacheLineSize( );
+
+ return err_;
+}
+
+static NvError NvRmMemGetAlignment_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemGetAlignment_in *p_in;
+ NvRmMemGetAlignment_out *p_out;
+
+ p_in = (NvRmMemGetAlignment_in *)InBuffer;
+ p_out = (NvRmMemGetAlignment_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemGetAlignment_params, out) - OFFSET(NvRmMemGetAlignment_params, inout));
+
+
+ p_out->ret_ = NvRmMemGetAlignment( p_in->hMem );
+
+ return err_;
+}
+
+static NvError NvRmMemGetSize_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemGetSize_in *p_in;
+ NvRmMemGetSize_out *p_out;
+
+ p_in = (NvRmMemGetSize_in *)InBuffer;
+ p_out = (NvRmMemGetSize_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemGetSize_params, out) - OFFSET(NvRmMemGetSize_params, inout));
+
+
+ p_out->ret_ = NvRmMemGetSize( p_in->hMem );
+
+ return err_;
+}
+
+static NvError NvRmMemMove_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemMove_in *p_in;
+
+ p_in = (NvRmMemMove_in *)InBuffer;
+
+
+ NvRmMemMove( p_in->hDstMem, p_in->DstOffset, p_in->hSrcMem, p_in->SrcOffset, p_in->Size );
+
+ return err_;
+}
+
+static NvError NvRmMemUnpinMult_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemUnpinMult_in *p_in;
+ NvRmMemHandle *hMems = NULL;
+
+ p_in = (NvRmMemUnpinMult_in *)InBuffer;
+
+ if( p_in->Count && p_in->hMems )
+ {
+ hMems = (NvRmMemHandle *)NvOsAlloc( p_in->Count * sizeof( NvRmMemHandle ) );
+ if( !hMems )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->hMems )
+ {
+ err_ = NvOsCopyIn( hMems, p_in->hMems, p_in->Count * sizeof( NvRmMemHandle ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ NvRmMemUnpinMult( hMems, p_in->Count );
+
+clean:
+ NvOsFree( hMems );
+ return err_;
+}
+
+static NvError NvRmMemUnpin_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemUnpin_in *p_in;
+
+ p_in = (NvRmMemUnpin_in *)InBuffer;
+
+
+ NvRmMemUnpin( p_in->hMem );
+
+ return err_;
+}
+
+static NvError NvRmMemGetAddress_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemGetAddress_in *p_in;
+ NvRmMemGetAddress_out *p_out;
+
+ p_in = (NvRmMemGetAddress_in *)InBuffer;
+ p_out = (NvRmMemGetAddress_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemGetAddress_params, out) - OFFSET(NvRmMemGetAddress_params, inout));
+
+
+ p_out->ret_ = NvRmMemGetAddress( p_in->hMem, p_in->Offset );
+
+ return err_;
+}
+
+static NvError NvRmMemPinMult_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemPinMult_in *p_in;
+ NvRmMemHandle *hMems = NULL;
+ NvU32 *Addrs = NULL;
+
+ p_in = (NvRmMemPinMult_in *)InBuffer;
+
+ if( p_in->Count && p_in->hMems )
+ {
+ hMems = (NvRmMemHandle *)NvOsAlloc( p_in->Count * sizeof( NvRmMemHandle ) );
+ if( !hMems )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->hMems )
+ {
+ err_ = NvOsCopyIn( hMems, p_in->hMems, p_in->Count * sizeof( NvRmMemHandle ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+ if( p_in->Count && p_in->Addrs )
+ {
+ Addrs = (NvU32 *)NvOsAlloc( p_in->Count * sizeof( NvU32 ) );
+ if( !Addrs )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ NvRmMemPinMult( hMems, Addrs, p_in->Count );
+
+ if(p_in->Addrs && Addrs)
+ {
+ err_ = NvOsCopyOut( p_in->Addrs, Addrs, p_in->Count * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( hMems );
+ NvOsFree( Addrs );
+ return err_;
+}
+
+static NvError NvRmMemPin_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemPin_in *p_in;
+ NvRmMemPin_out *p_out;
+
+ p_in = (NvRmMemPin_in *)InBuffer;
+ p_out = (NvRmMemPin_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemPin_params, out) - OFFSET(NvRmMemPin_params, inout));
+
+
+ p_out->ret_ = NvRmMemPin( p_in->hMem );
+
+ return err_;
+}
+
+static NvError NvRmMemAlloc_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemAlloc_in *p_in;
+ NvRmMemAlloc_out *p_out;
+ NvRmHeap *Heaps = NULL;
+
+ p_in = (NvRmMemAlloc_in *)InBuffer;
+ p_out = (NvRmMemAlloc_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemAlloc_params, out) - OFFSET(NvRmMemAlloc_params, inout));
+
+ if( p_in->NumHeaps && p_in->Heaps )
+ {
+ Heaps = (NvRmHeap *)NvOsAlloc( p_in->NumHeaps * sizeof( NvRmHeap ) );
+ if( !Heaps )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->Heaps )
+ {
+ err_ = NvOsCopyIn( Heaps, p_in->Heaps, p_in->NumHeaps * sizeof( NvRmHeap ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmMemAlloc( p_in->hMem, Heaps, p_in->NumHeaps, p_in->Alignment, p_in->Coherency );
+
+clean:
+ NvOsFree( Heaps );
+ return err_;
+}
+
+static NvError NvRmMemHandleFree_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemHandleFree_in *p_in;
+
+ p_in = (NvRmMemHandleFree_in *)InBuffer;
+
+ if (p_in->hMem != NULL) NvRtFreeObjRef(Ctx, NvRtObjType_NvRm_NvRmMemHandle, p_in->hMem);
+
+ NvRmMemHandleFree( p_in->hMem );
+
+ return err_;
+}
+
+static NvError NvRmMemHandlePreserveHandle_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemHandlePreserveHandle_in *p_in;
+ NvRmMemHandlePreserveHandle_out *p_out;
+
+ p_in = (NvRmMemHandlePreserveHandle_in *)InBuffer;
+ p_out = (NvRmMemHandlePreserveHandle_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemHandlePreserveHandle_params, out) - OFFSET(NvRmMemHandlePreserveHandle_params, inout));
+
+
+ p_out->ret_ = NvRmMemHandlePreserveHandle( p_in->hMem, &p_out->Key );
+
+ return err_;
+}
+
+static NvError NvRmMemHandleClaimPreservedHandle_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemHandleClaimPreservedHandle_in *p_in;
+ NvRmMemHandleClaimPreservedHandle_out *p_out;
+ NvRtObjRefHandle ref_phMem = 0;
+
+ p_in = (NvRmMemHandleClaimPreservedHandle_in *)InBuffer;
+ p_out = (NvRmMemHandleClaimPreservedHandle_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemHandleClaimPreservedHandle_params, out) - OFFSET(NvRmMemHandleClaimPreservedHandle_params, inout));
+
+ err_ = NvRtAllocObjRef(Ctx, &ref_phMem);
+ if (err_ != NvSuccess)
+ {
+ goto clean;
+ }
+
+ p_out->ret_ = NvRmMemHandleClaimPreservedHandle( p_in->hDevice, p_in->Key, &p_out->phMem );
+
+ if ( p_out->ret_ == NvSuccess )
+ {
+ NvRtStoreObjRef(Ctx, ref_phMem, NvRtObjType_NvRm_NvRmMemHandle, p_out->phMem);
+ ref_phMem = 0;
+ }
+clean:
+ if (ref_phMem) NvRtDiscardObjRef(Ctx, ref_phMem);
+ return err_;
+}
+
+static NvError NvRmMemHandleCreate_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMemHandleCreate_in *p_in;
+ NvRmMemHandleCreate_out *p_out;
+ NvRtObjRefHandle ref_phMem = 0;
+
+ p_in = (NvRmMemHandleCreate_in *)InBuffer;
+ p_out = (NvRmMemHandleCreate_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMemHandleCreate_params, out) - OFFSET(NvRmMemHandleCreate_params, inout));
+
+ err_ = NvRtAllocObjRef(Ctx, &ref_phMem);
+ if (err_ != NvSuccess)
+ {
+ goto clean;
+ }
+
+ p_out->ret_ = NvRmMemHandleCreate( p_in->hDevice, &p_out->phMem, p_in->Size );
+
+ if ( p_out->ret_ == NvSuccess )
+ {
+ NvRtStoreObjRef(Ctx, ref_phMem, NvRtObjType_NvRm_NvRmMemHandle, p_out->phMem);
+ ref_phMem = 0;
+ }
+clean:
+ if (ref_phMem) NvRtDiscardObjRef(Ctx, ref_phMem);
+ return err_;
+}
+
+NvError nvrm_memmgr_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_memmgr_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 17:
+ err_ = NvRmMemGetStat_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 16:
+ err_ = NvRmMemHandleFromId_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 15:
+ err_ = NvRmMemGetId_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 14:
+ err_ = NvRmMemGetHeapType_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 13:
+ err_ = NvRmMemGetCacheLineSize_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 12:
+ err_ = NvRmMemGetAlignment_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 11:
+ err_ = NvRmMemGetSize_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 10:
+ err_ = NvRmMemMove_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 9:
+ err_ = NvRmMemUnpinMult_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 8:
+ err_ = NvRmMemUnpin_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 7:
+ err_ = NvRmMemGetAddress_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 6:
+ err_ = NvRmMemPinMult_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 5:
+ err_ = NvRmMemPin_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 4:
+ err_ = NvRmMemAlloc_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 3:
+ err_ = NvRmMemHandleFree_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = NvRmMemHandlePreserveHandle_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmMemHandleClaimPreservedHandle_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmMemHandleCreate_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_module_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_module_dispatch.c
new file mode 100644
index 000000000000..49bb062a16ba
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_module_dispatch.c
@@ -0,0 +1,974 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_module.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRegw08_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle rm;
+ NvRmModuleID aperture;
+ NvU32 instance;
+ NvU32 offset;
+ NvU8 data;
+} NV_ALIGN(4) NvRegw08_in;
+
+typedef struct NvRegw08_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegw08_inout;
+
+typedef struct NvRegw08_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegw08_out;
+
+typedef struct NvRegw08_params_t
+{
+ NvRegw08_in in;
+ NvRegw08_inout inout;
+ NvRegw08_out out;
+} NvRegw08_params;
+
+typedef struct NvRegr08_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDeviceHandle;
+ NvRmModuleID aperture;
+ NvU32 instance;
+ NvU32 offset;
+} NV_ALIGN(4) NvRegr08_in;
+
+typedef struct NvRegr08_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegr08_inout;
+
+typedef struct NvRegr08_out_t
+{
+ NvU8 ret_;
+} NV_ALIGN(4) NvRegr08_out;
+
+typedef struct NvRegr08_params_t
+{
+ NvRegr08_in in;
+ NvRegr08_inout inout;
+ NvRegr08_out out;
+} NvRegr08_params;
+
+typedef struct NvRegrb_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID aperture;
+ NvU32 instance;
+ NvU32 num;
+ NvU32 offset;
+ NvU32 * values;
+} NV_ALIGN(4) NvRegrb_in;
+
+typedef struct NvRegrb_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegrb_inout;
+
+typedef struct NvRegrb_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegrb_out;
+
+typedef struct NvRegrb_params_t
+{
+ NvRegrb_in in;
+ NvRegrb_inout inout;
+ NvRegrb_out out;
+} NvRegrb_params;
+
+typedef struct NvRegwb_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID aperture;
+ NvU32 instance;
+ NvU32 num;
+ NvU32 offset;
+ NvU32 * values;
+} NV_ALIGN(4) NvRegwb_in;
+
+typedef struct NvRegwb_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegwb_inout;
+
+typedef struct NvRegwb_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegwb_out;
+
+typedef struct NvRegwb_params_t
+{
+ NvRegwb_in in;
+ NvRegwb_inout inout;
+ NvRegwb_out out;
+} NvRegwb_params;
+
+typedef struct NvRegwm_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID aperture;
+ NvU32 instance;
+ NvU32 num;
+ NvU32 * offsets;
+ NvU32 * values;
+} NV_ALIGN(4) NvRegwm_in;
+
+typedef struct NvRegwm_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegwm_inout;
+
+typedef struct NvRegwm_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegwm_out;
+
+typedef struct NvRegwm_params_t
+{
+ NvRegwm_in in;
+ NvRegwm_inout inout;
+ NvRegwm_out out;
+} NvRegwm_params;
+
+typedef struct NvRegrm_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID aperture;
+ NvU32 instance;
+ NvU32 num;
+ NvU32 * offsets;
+ NvU32 * values;
+} NV_ALIGN(4) NvRegrm_in;
+
+typedef struct NvRegrm_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegrm_inout;
+
+typedef struct NvRegrm_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegrm_out;
+
+typedef struct NvRegrm_params_t
+{
+ NvRegrm_in in;
+ NvRegrm_inout inout;
+ NvRegrm_out out;
+} NvRegrm_params;
+
+typedef struct NvRegw_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDeviceHandle;
+ NvRmModuleID aperture;
+ NvU32 instance;
+ NvU32 offset;
+ NvU32 data;
+} NV_ALIGN(4) NvRegw_in;
+
+typedef struct NvRegw_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegw_inout;
+
+typedef struct NvRegw_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegw_out;
+
+typedef struct NvRegw_params_t
+{
+ NvRegw_in in;
+ NvRegw_inout inout;
+ NvRegw_out out;
+} NvRegw_params;
+
+typedef struct NvRegr_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDeviceHandle;
+ NvRmModuleID aperture;
+ NvU32 instance;
+ NvU32 offset;
+} NV_ALIGN(4) NvRegr_in;
+
+typedef struct NvRegr_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRegr_inout;
+
+typedef struct NvRegr_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRegr_out;
+
+typedef struct NvRegr_params_t
+{
+ NvRegr_in in;
+ NvRegr_inout inout;
+ NvRegr_out out;
+} NvRegr_params;
+
+typedef struct NvRmGetRandomBytes_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvU32 NumBytes;
+ void* pBytes;
+} NV_ALIGN(4) NvRmGetRandomBytes_in;
+
+typedef struct NvRmGetRandomBytes_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGetRandomBytes_inout;
+
+typedef struct NvRmGetRandomBytes_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmGetRandomBytes_out;
+
+typedef struct NvRmGetRandomBytes_params_t
+{
+ NvRmGetRandomBytes_in in;
+ NvRmGetRandomBytes_inout inout;
+ NvRmGetRandomBytes_out out;
+} NvRmGetRandomBytes_params;
+
+typedef struct NvRmQueryChipUniqueId_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevHandle;
+ NvU32 IdSize;
+ void* pId;
+} NV_ALIGN(4) NvRmQueryChipUniqueId_in;
+
+typedef struct NvRmQueryChipUniqueId_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmQueryChipUniqueId_inout;
+
+typedef struct NvRmQueryChipUniqueId_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmQueryChipUniqueId_out;
+
+typedef struct NvRmQueryChipUniqueId_params_t
+{
+ NvRmQueryChipUniqueId_in in;
+ NvRmQueryChipUniqueId_inout inout;
+ NvRmQueryChipUniqueId_out out;
+} NvRmQueryChipUniqueId_params;
+
+typedef struct NvRmModuleGetCapabilities_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDeviceHandle;
+ NvRmModuleID Module;
+ NvRmModuleCapability * pCaps;
+ NvU32 NumCaps;
+} NV_ALIGN(4) NvRmModuleGetCapabilities_in;
+
+typedef struct NvRmModuleGetCapabilities_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmModuleGetCapabilities_inout;
+
+typedef struct NvRmModuleGetCapabilities_out_t
+{
+ NvError ret_;
+ void* Capability;
+} NV_ALIGN(4) NvRmModuleGetCapabilities_out;
+
+typedef struct NvRmModuleGetCapabilities_params_t
+{
+ NvRmModuleGetCapabilities_in in;
+ NvRmModuleGetCapabilities_inout inout;
+ NvRmModuleGetCapabilities_out out;
+} NvRmModuleGetCapabilities_params;
+
+typedef struct NvRmModuleResetWithHold_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID Module;
+ NvBool bHold;
+} NV_ALIGN(4) NvRmModuleResetWithHold_in;
+
+typedef struct NvRmModuleResetWithHold_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmModuleResetWithHold_inout;
+
+typedef struct NvRmModuleResetWithHold_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmModuleResetWithHold_out;
+
+typedef struct NvRmModuleResetWithHold_params_t
+{
+ NvRmModuleResetWithHold_in in;
+ NvRmModuleResetWithHold_inout inout;
+ NvRmModuleResetWithHold_out out;
+} NvRmModuleResetWithHold_params;
+
+typedef struct NvRmModuleReset_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID Module;
+} NV_ALIGN(4) NvRmModuleReset_in;
+
+typedef struct NvRmModuleReset_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmModuleReset_inout;
+
+typedef struct NvRmModuleReset_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmModuleReset_out;
+
+typedef struct NvRmModuleReset_params_t
+{
+ NvRmModuleReset_in in;
+ NvRmModuleReset_inout inout;
+ NvRmModuleReset_out out;
+} NvRmModuleReset_params;
+
+typedef struct NvRmModuleGetNumInstances_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID Module;
+} NV_ALIGN(4) NvRmModuleGetNumInstances_in;
+
+typedef struct NvRmModuleGetNumInstances_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmModuleGetNumInstances_inout;
+
+typedef struct NvRmModuleGetNumInstances_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmModuleGetNumInstances_out;
+
+typedef struct NvRmModuleGetNumInstances_params_t
+{
+ NvRmModuleGetNumInstances_in in;
+ NvRmModuleGetNumInstances_inout inout;
+ NvRmModuleGetNumInstances_out out;
+} NvRmModuleGetNumInstances_params;
+
+typedef struct NvRmModuleGetBaseAddress_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID Module;
+} NV_ALIGN(4) NvRmModuleGetBaseAddress_in;
+
+typedef struct NvRmModuleGetBaseAddress_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmModuleGetBaseAddress_inout;
+
+typedef struct NvRmModuleGetBaseAddress_out_t
+{
+ NvRmPhysAddr pBaseAddress;
+ NvU32 pSize;
+} NV_ALIGN(4) NvRmModuleGetBaseAddress_out;
+
+typedef struct NvRmModuleGetBaseAddress_params_t
+{
+ NvRmModuleGetBaseAddress_in in;
+ NvRmModuleGetBaseAddress_inout inout;
+ NvRmModuleGetBaseAddress_out out;
+} NvRmModuleGetBaseAddress_params;
+
+typedef struct NvRmModuleGetModuleInfo_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvRmModuleID module;
+ NvRmModuleInfo * pModuleInfo;
+} NV_ALIGN(4) NvRmModuleGetModuleInfo_in;
+
+typedef struct NvRmModuleGetModuleInfo_inout_t
+{
+ NvU32 pNum;
+} NV_ALIGN(4) NvRmModuleGetModuleInfo_inout;
+
+typedef struct NvRmModuleGetModuleInfo_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmModuleGetModuleInfo_out;
+
+typedef struct NvRmModuleGetModuleInfo_params_t
+{
+ NvRmModuleGetModuleInfo_in in;
+ NvRmModuleGetModuleInfo_inout inout;
+ NvRmModuleGetModuleInfo_out out;
+} NvRmModuleGetModuleInfo_params;
+
+static NvError NvRegw08_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRegw08_in *p_in;
+
+ p_in = (NvRegw08_in *)InBuffer;
+
+
+ NvRegw08( p_in->rm, p_in->aperture, p_in->instance, p_in->offset, p_in->data );
+
+ return err_;
+}
+
+static NvError NvRegr08_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRegr08_in *p_in;
+ NvRegr08_out *p_out;
+
+ p_in = (NvRegr08_in *)InBuffer;
+ p_out = (NvRegr08_out *)((NvU8 *)OutBuffer + OFFSET(NvRegr08_params, out) - OFFSET(NvRegr08_params, inout));
+
+
+ p_out->ret_ = NvRegr08( p_in->hDeviceHandle, p_in->aperture, p_in->instance, p_in->offset );
+
+ return err_;
+}
+
+static NvError NvRegrb_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRegrb_in *p_in;
+ NvU32 *values = NULL;
+
+ p_in = (NvRegrb_in *)InBuffer;
+
+ if( p_in->num && p_in->values )
+ {
+ values = (NvU32 *)NvOsAlloc( p_in->num * sizeof( NvU32 ) );
+ if( !values )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ NvRegrb( p_in->hRmDeviceHandle, p_in->aperture, p_in->instance, p_in->num, p_in->offset, values );
+
+ if(p_in->values && values)
+ {
+ err_ = NvOsCopyOut( p_in->values, values, p_in->num * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( values );
+ return err_;
+}
+
+static NvError NvRegwb_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRegwb_in *p_in;
+ NvU32 *values = NULL;
+
+ p_in = (NvRegwb_in *)InBuffer;
+
+ if( p_in->num && p_in->values )
+ {
+ values = (NvU32 *)NvOsAlloc( p_in->num * sizeof( NvU32 ) );
+ if( !values )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->values )
+ {
+ err_ = NvOsCopyIn( values, p_in->values, p_in->num * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ NvRegwb( p_in->hRmDeviceHandle, p_in->aperture, p_in->instance, p_in->num, p_in->offset, values );
+
+clean:
+ NvOsFree( values );
+ return err_;
+}
+
+static NvError NvRegwm_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRegwm_in *p_in;
+ NvU32 *offsets = NULL;
+ NvU32 *values = NULL;
+
+ p_in = (NvRegwm_in *)InBuffer;
+
+ if( p_in->num && p_in->offsets )
+ {
+ offsets = (NvU32 *)NvOsAlloc( p_in->num * sizeof( NvU32 ) );
+ if( !offsets )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->offsets )
+ {
+ err_ = NvOsCopyIn( offsets, p_in->offsets, p_in->num * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+ if( p_in->num && p_in->values )
+ {
+ values = (NvU32 *)NvOsAlloc( p_in->num * sizeof( NvU32 ) );
+ if( !values )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->values )
+ {
+ err_ = NvOsCopyIn( values, p_in->values, p_in->num * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ NvRegwm( p_in->hRmDeviceHandle, p_in->aperture, p_in->instance, p_in->num, offsets, values );
+
+clean:
+ NvOsFree( offsets );
+ NvOsFree( values );
+ return err_;
+}
+
+static NvError NvRegrm_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRegrm_in *p_in;
+ NvU32 *offsets = NULL;
+ NvU32 *values = NULL;
+
+ p_in = (NvRegrm_in *)InBuffer;
+
+ if( p_in->num && p_in->offsets )
+ {
+ offsets = (NvU32 *)NvOsAlloc( p_in->num * sizeof( NvU32 ) );
+ if( !offsets )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->offsets )
+ {
+ err_ = NvOsCopyIn( offsets, p_in->offsets, p_in->num * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+ if( p_in->num && p_in->values )
+ {
+ values = (NvU32 *)NvOsAlloc( p_in->num * sizeof( NvU32 ) );
+ if( !values )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ NvRegrm( p_in->hRmDeviceHandle, p_in->aperture, p_in->instance, p_in->num, offsets, values );
+
+ if(p_in->values && values)
+ {
+ err_ = NvOsCopyOut( p_in->values, values, p_in->num * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( offsets );
+ NvOsFree( values );
+ return err_;
+}
+
+static NvError NvRegw_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRegw_in *p_in;
+
+ p_in = (NvRegw_in *)InBuffer;
+
+
+ NvRegw( p_in->hDeviceHandle, p_in->aperture, p_in->instance, p_in->offset, p_in->data );
+
+ return err_;
+}
+
+static NvError NvRegr_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRegr_in *p_in;
+ NvRegr_out *p_out;
+
+ p_in = (NvRegr_in *)InBuffer;
+ p_out = (NvRegr_out *)((NvU8 *)OutBuffer + OFFSET(NvRegr_params, out) - OFFSET(NvRegr_params, inout));
+
+
+ p_out->ret_ = NvRegr( p_in->hDeviceHandle, p_in->aperture, p_in->instance, p_in->offset );
+
+ return err_;
+}
+
+static NvError NvRmGetRandomBytes_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGetRandomBytes_in *p_in;
+ NvRmGetRandomBytes_out *p_out;
+ void* pBytes = NULL;
+
+ p_in = (NvRmGetRandomBytes_in *)InBuffer;
+ p_out = (NvRmGetRandomBytes_out *)((NvU8 *)OutBuffer + OFFSET(NvRmGetRandomBytes_params, out) - OFFSET(NvRmGetRandomBytes_params, inout));
+
+ if( p_in->NumBytes && p_in->pBytes )
+ {
+ pBytes = (void* )NvOsAlloc( p_in->NumBytes );
+ if( !pBytes )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmGetRandomBytes( p_in->hRmDeviceHandle, p_in->NumBytes, pBytes );
+
+ if(p_in->pBytes && pBytes)
+ {
+ err_ = NvOsCopyOut( p_in->pBytes, pBytes, p_in->NumBytes );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pBytes );
+ return err_;
+}
+
+static NvError NvRmQueryChipUniqueId_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmQueryChipUniqueId_in *p_in;
+ NvRmQueryChipUniqueId_out *p_out;
+ void* pId = NULL;
+
+ p_in = (NvRmQueryChipUniqueId_in *)InBuffer;
+ p_out = (NvRmQueryChipUniqueId_out *)((NvU8 *)OutBuffer + OFFSET(NvRmQueryChipUniqueId_params, out) - OFFSET(NvRmQueryChipUniqueId_params, inout));
+
+ if( p_in->IdSize && p_in->pId )
+ {
+ pId = (void* )NvOsAlloc( p_in->IdSize );
+ if( !pId )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmQueryChipUniqueId( p_in->hDevHandle, p_in->IdSize, pId );
+
+ if(p_in->pId && pId)
+ {
+ err_ = NvOsCopyOut( p_in->pId, pId, p_in->IdSize );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pId );
+ return err_;
+}
+
+static NvError NvRmModuleGetCapabilities_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmModuleGetCapabilities_in *p_in;
+ NvRmModuleGetCapabilities_out *p_out;
+ NvRmModuleCapability *pCaps = NULL;
+
+ p_in = (NvRmModuleGetCapabilities_in *)InBuffer;
+ p_out = (NvRmModuleGetCapabilities_out *)((NvU8 *)OutBuffer + OFFSET(NvRmModuleGetCapabilities_params, out) - OFFSET(NvRmModuleGetCapabilities_params, inout));
+
+ if( p_in->NumCaps && p_in->pCaps )
+ {
+ pCaps = (NvRmModuleCapability *)NvOsAlloc( p_in->NumCaps * sizeof( NvRmModuleCapability ) );
+ if( !pCaps )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pCaps )
+ {
+ err_ = NvOsCopyIn( pCaps, p_in->pCaps, p_in->NumCaps * sizeof( NvRmModuleCapability ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmModuleGetCapabilities( p_in->hDeviceHandle, p_in->Module, pCaps, p_in->NumCaps, &p_out->Capability );
+
+clean:
+ NvOsFree( pCaps );
+ return err_;
+}
+
+static NvError NvRmModuleResetWithHold_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmModuleResetWithHold_in *p_in;
+
+ p_in = (NvRmModuleResetWithHold_in *)InBuffer;
+
+
+ NvRmModuleResetWithHold( p_in->hRmDeviceHandle, p_in->Module, p_in->bHold );
+
+ return err_;
+}
+
+static NvError NvRmModuleReset_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmModuleReset_in *p_in;
+
+ p_in = (NvRmModuleReset_in *)InBuffer;
+
+
+ NvRmModuleReset( p_in->hRmDeviceHandle, p_in->Module );
+
+ return err_;
+}
+
+static NvError NvRmModuleGetNumInstances_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmModuleGetNumInstances_in *p_in;
+ NvRmModuleGetNumInstances_out *p_out;
+
+ p_in = (NvRmModuleGetNumInstances_in *)InBuffer;
+ p_out = (NvRmModuleGetNumInstances_out *)((NvU8 *)OutBuffer + OFFSET(NvRmModuleGetNumInstances_params, out) - OFFSET(NvRmModuleGetNumInstances_params, inout));
+
+
+ p_out->ret_ = NvRmModuleGetNumInstances( p_in->hRmDeviceHandle, p_in->Module );
+
+ return err_;
+}
+
+static NvError NvRmModuleGetBaseAddress_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmModuleGetBaseAddress_in *p_in;
+ NvRmModuleGetBaseAddress_out *p_out;
+
+ p_in = (NvRmModuleGetBaseAddress_in *)InBuffer;
+ p_out = (NvRmModuleGetBaseAddress_out *)((NvU8 *)OutBuffer + OFFSET(NvRmModuleGetBaseAddress_params, out) - OFFSET(NvRmModuleGetBaseAddress_params, inout));
+
+
+ NvRmModuleGetBaseAddress( p_in->hRmDeviceHandle, p_in->Module, &p_out->pBaseAddress, &p_out->pSize );
+
+ return err_;
+}
+
+static NvError NvRmModuleGetModuleInfo_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmModuleGetModuleInfo_in *p_in;
+ NvRmModuleGetModuleInfo_inout *p_inout;
+ NvRmModuleGetModuleInfo_out *p_out;
+ NvRmModuleGetModuleInfo_inout inout;
+ NvRmModuleInfo *pModuleInfo = NULL;
+
+ p_in = (NvRmModuleGetModuleInfo_in *)InBuffer;
+ p_inout = (NvRmModuleGetModuleInfo_inout *)((NvU8 *)InBuffer + OFFSET(NvRmModuleGetModuleInfo_params, inout));
+ p_out = (NvRmModuleGetModuleInfo_out *)((NvU8 *)OutBuffer + OFFSET(NvRmModuleGetModuleInfo_params, out) - OFFSET(NvRmModuleGetModuleInfo_params, inout));
+
+ (void)inout;
+ inout.pNum = p_inout->pNum;
+ if( p_inout->pNum && p_in->pModuleInfo )
+ {
+ pModuleInfo = (NvRmModuleInfo *)NvOsAlloc( p_inout->pNum * sizeof( NvRmModuleInfo ) );
+ if( !pModuleInfo )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmModuleGetModuleInfo( p_in->hDevice, p_in->module, &inout.pNum, pModuleInfo );
+
+
+ p_inout = (NvRmModuleGetModuleInfo_inout *)OutBuffer;
+ p_inout->pNum = inout.pNum;
+ if(p_in->pModuleInfo && pModuleInfo)
+ {
+ err_ = NvOsCopyOut( p_in->pModuleInfo, pModuleInfo, p_inout->pNum * sizeof( NvRmModuleInfo ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pModuleInfo );
+ return err_;
+}
+
+NvError nvrm_module_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_module_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 15:
+ err_ = NvRegw08_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 14:
+ err_ = NvRegr08_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 13:
+ err_ = NvRegrb_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 12:
+ err_ = NvRegwb_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 11:
+ err_ = NvRegwm_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 10:
+ err_ = NvRegrm_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 9:
+ err_ = NvRegw_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 8:
+ err_ = NvRegr_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 7:
+ err_ = NvRmGetRandomBytes_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 6:
+ err_ = NvRmQueryChipUniqueId_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 5:
+ err_ = NvRmModuleGetCapabilities_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 4:
+ err_ = NvRmModuleResetWithHold_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 3:
+ err_ = NvRmModuleReset_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = NvRmModuleGetNumInstances_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmModuleGetBaseAddress_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmModuleGetModuleInfo_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_owr_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_owr_dispatch.c
new file mode 100644
index 000000000000..57f9b7b703e8
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_owr_dispatch.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_owr.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmOwrTransaction_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmOwrHandle hOwr;
+ NvU32 OwrPinMap;
+ NvU8 * Data;
+ NvU32 DataLen;
+ NvRmOwrTransactionInfo * Transaction;
+ NvU32 NumOfTransactions;
+} NV_ALIGN(4) NvRmOwrTransaction_in;
+
+typedef struct NvRmOwrTransaction_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmOwrTransaction_inout;
+
+typedef struct NvRmOwrTransaction_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmOwrTransaction_out;
+
+typedef struct NvRmOwrTransaction_params_t
+{
+ NvRmOwrTransaction_in in;
+ NvRmOwrTransaction_inout inout;
+ NvRmOwrTransaction_out out;
+} NvRmOwrTransaction_params;
+
+typedef struct NvRmOwrClose_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmOwrHandle hOwr;
+} NV_ALIGN(4) NvRmOwrClose_in;
+
+typedef struct NvRmOwrClose_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmOwrClose_inout;
+
+typedef struct NvRmOwrClose_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmOwrClose_out;
+
+typedef struct NvRmOwrClose_params_t
+{
+ NvRmOwrClose_in in;
+ NvRmOwrClose_inout inout;
+ NvRmOwrClose_out out;
+} NvRmOwrClose_params;
+
+typedef struct NvRmOwrOpen_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvU32 instance;
+} NV_ALIGN(4) NvRmOwrOpen_in;
+
+typedef struct NvRmOwrOpen_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmOwrOpen_inout;
+
+typedef struct NvRmOwrOpen_out_t
+{
+ NvError ret_;
+ NvRmOwrHandle hOwr;
+} NV_ALIGN(4) NvRmOwrOpen_out;
+
+typedef struct NvRmOwrOpen_params_t
+{
+ NvRmOwrOpen_in in;
+ NvRmOwrOpen_inout inout;
+ NvRmOwrOpen_out out;
+} NvRmOwrOpen_params;
+
+static NvError NvRmOwrTransaction_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmOwrTransaction_in *p_in;
+ NvRmOwrTransaction_out *p_out;
+ NvU8 *Data = NULL;
+ NvRmOwrTransactionInfo *Transaction = NULL;
+
+ p_in = (NvRmOwrTransaction_in *)InBuffer;
+ p_out = (NvRmOwrTransaction_out *)((NvU8 *)OutBuffer + OFFSET(NvRmOwrTransaction_params, out) - OFFSET(NvRmOwrTransaction_params, inout));
+
+ if( p_in->DataLen && p_in->Data )
+ {
+ Data = (NvU8 *)NvOsAlloc( p_in->DataLen * sizeof( NvU8 ) );
+ if( !Data )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->Data )
+ {
+ err_ = NvOsCopyIn( Data, p_in->Data, p_in->DataLen * sizeof( NvU8 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+ if( p_in->NumOfTransactions && p_in->Transaction )
+ {
+ Transaction = (NvRmOwrTransactionInfo *)NvOsAlloc( p_in->NumOfTransactions * sizeof( NvRmOwrTransactionInfo ) );
+ if( !Transaction )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->Transaction )
+ {
+ err_ = NvOsCopyIn( Transaction, p_in->Transaction, p_in->NumOfTransactions * sizeof( NvRmOwrTransactionInfo ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmOwrTransaction( p_in->hOwr, p_in->OwrPinMap, Data, p_in->DataLen, Transaction, p_in->NumOfTransactions );
+
+ if(p_in->Data && Data)
+ {
+ err_ = NvOsCopyOut( p_in->Data, Data, p_in->DataLen * sizeof( NvU8 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( Data );
+ NvOsFree( Transaction );
+ return err_;
+}
+
+static NvError NvRmOwrClose_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmOwrClose_in *p_in;
+
+ p_in = (NvRmOwrClose_in *)InBuffer;
+
+
+ NvRmOwrClose( p_in->hOwr );
+
+ return err_;
+}
+
+static NvError NvRmOwrOpen_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmOwrOpen_in *p_in;
+ NvRmOwrOpen_out *p_out;
+
+ p_in = (NvRmOwrOpen_in *)InBuffer;
+ p_out = (NvRmOwrOpen_out *)((NvU8 *)OutBuffer + OFFSET(NvRmOwrOpen_params, out) - OFFSET(NvRmOwrOpen_params, inout));
+
+
+ p_out->ret_ = NvRmOwrOpen( p_in->hDevice, p_in->instance, &p_out->hOwr );
+
+ return err_;
+}
+
+NvError nvrm_owr_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_owr_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 2:
+ err_ = NvRmOwrTransaction_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmOwrClose_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmOwrOpen_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_pcie_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_pcie_dispatch.c
new file mode 100644
index 000000000000..1a6b9344b493
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_pcie_dispatch.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_pcie.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmUnmapPciMemory_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDeviceHandle;
+ NvRmPhysAddr mem;
+ NvU32 size;
+} NV_ALIGN(4) NvRmUnmapPciMemory_in;
+
+typedef struct NvRmUnmapPciMemory_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmUnmapPciMemory_inout;
+
+typedef struct NvRmUnmapPciMemory_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmUnmapPciMemory_out;
+
+typedef struct NvRmUnmapPciMemory_params_t
+{
+ NvRmUnmapPciMemory_in in;
+ NvRmUnmapPciMemory_inout inout;
+ NvRmUnmapPciMemory_out out;
+} NvRmUnmapPciMemory_params;
+
+typedef struct NvRmMapPciMemory_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDeviceHandle;
+ NvRmPciPhysAddr mem;
+ NvU32 size;
+} NV_ALIGN(4) NvRmMapPciMemory_in;
+
+typedef struct NvRmMapPciMemory_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmMapPciMemory_inout;
+
+typedef struct NvRmMapPciMemory_out_t
+{
+ NvRmPhysAddr ret_;
+} NV_ALIGN(4) NvRmMapPciMemory_out;
+
+typedef struct NvRmMapPciMemory_params_t
+{
+ NvRmMapPciMemory_in in;
+ NvRmMapPciMemory_inout inout;
+ NvRmMapPciMemory_out out;
+} NvRmMapPciMemory_params;
+
+typedef struct NvRmRegisterPcieLegacyHandler_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDeviceHandle;
+ NvU32 function_device_bus;
+ NvOsSemaphoreHandle sem;
+ NvBool InterruptEnable;
+} NV_ALIGN(4) NvRmRegisterPcieLegacyHandler_in;
+
+typedef struct NvRmRegisterPcieLegacyHandler_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmRegisterPcieLegacyHandler_inout;
+
+typedef struct NvRmRegisterPcieLegacyHandler_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmRegisterPcieLegacyHandler_out;
+
+typedef struct NvRmRegisterPcieLegacyHandler_params_t
+{
+ NvRmRegisterPcieLegacyHandler_in in;
+ NvRmRegisterPcieLegacyHandler_inout inout;
+ NvRmRegisterPcieLegacyHandler_out out;
+} NvRmRegisterPcieLegacyHandler_params;
+
+typedef struct NvRmRegisterPcieMSIHandler_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDeviceHandle;
+ NvU32 function_device_bus;
+ NvU32 index;
+ NvOsSemaphoreHandle sem;
+ NvBool InterruptEnable;
+} NV_ALIGN(4) NvRmRegisterPcieMSIHandler_in;
+
+typedef struct NvRmRegisterPcieMSIHandler_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmRegisterPcieMSIHandler_inout;
+
+typedef struct NvRmRegisterPcieMSIHandler_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmRegisterPcieMSIHandler_out;
+
+typedef struct NvRmRegisterPcieMSIHandler_params_t
+{
+ NvRmRegisterPcieMSIHandler_in in;
+ NvRmRegisterPcieMSIHandler_inout inout;
+ NvRmRegisterPcieMSIHandler_out out;
+} NvRmRegisterPcieMSIHandler_params;
+
+typedef struct NvRmReadWriteConfigSpace_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDeviceHandle;
+ NvU32 bus_number;
+ NvRmPcieAccessType type;
+ NvU32 offset;
+ NvU8 * Data;
+ NvU32 DataLen;
+} NV_ALIGN(4) NvRmReadWriteConfigSpace_in;
+
+typedef struct NvRmReadWriteConfigSpace_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmReadWriteConfigSpace_inout;
+
+typedef struct NvRmReadWriteConfigSpace_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmReadWriteConfigSpace_out;
+
+typedef struct NvRmReadWriteConfigSpace_params_t
+{
+ NvRmReadWriteConfigSpace_in in;
+ NvRmReadWriteConfigSpace_inout inout;
+ NvRmReadWriteConfigSpace_out out;
+} NvRmReadWriteConfigSpace_params;
+
+static NvError NvRmUnmapPciMemory_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmUnmapPciMemory_in *p_in;
+
+ p_in = (NvRmUnmapPciMemory_in *)InBuffer;
+
+
+ NvRmUnmapPciMemory( p_in->hDeviceHandle, p_in->mem, p_in->size );
+
+ return err_;
+}
+
+static NvError NvRmMapPciMemory_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmMapPciMemory_in *p_in;
+ NvRmMapPciMemory_out *p_out;
+
+ p_in = (NvRmMapPciMemory_in *)InBuffer;
+ p_out = (NvRmMapPciMemory_out *)((NvU8 *)OutBuffer + OFFSET(NvRmMapPciMemory_params, out) - OFFSET(NvRmMapPciMemory_params, inout));
+
+
+ p_out->ret_ = NvRmMapPciMemory( p_in->hDeviceHandle, p_in->mem, p_in->size );
+
+ return err_;
+}
+
+static NvError NvRmRegisterPcieLegacyHandler_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmRegisterPcieLegacyHandler_in *p_in;
+ NvRmRegisterPcieLegacyHandler_out *p_out;
+ NvOsSemaphoreHandle sem = NULL;
+
+ p_in = (NvRmRegisterPcieLegacyHandler_in *)InBuffer;
+ p_out = (NvRmRegisterPcieLegacyHandler_out *)((NvU8 *)OutBuffer + OFFSET(NvRmRegisterPcieLegacyHandler_params, out) - OFFSET(NvRmRegisterPcieLegacyHandler_params, inout));
+
+ if( p_in->sem )
+ {
+ err_ = NvOsSemaphoreUnmarshal( p_in->sem, &sem );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmRegisterPcieLegacyHandler( p_in->hDeviceHandle, p_in->function_device_bus, sem, p_in->InterruptEnable );
+
+clean:
+ NvOsSemaphoreDestroy( sem );
+ return err_;
+}
+
+static NvError NvRmRegisterPcieMSIHandler_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmRegisterPcieMSIHandler_in *p_in;
+ NvRmRegisterPcieMSIHandler_out *p_out;
+ NvOsSemaphoreHandle sem = NULL;
+
+ p_in = (NvRmRegisterPcieMSIHandler_in *)InBuffer;
+ p_out = (NvRmRegisterPcieMSIHandler_out *)((NvU8 *)OutBuffer + OFFSET(NvRmRegisterPcieMSIHandler_params, out) - OFFSET(NvRmRegisterPcieMSIHandler_params, inout));
+
+ if( p_in->sem )
+ {
+ err_ = NvOsSemaphoreUnmarshal( p_in->sem, &sem );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmRegisterPcieMSIHandler( p_in->hDeviceHandle, p_in->function_device_bus, p_in->index, sem, p_in->InterruptEnable );
+
+clean:
+ NvOsSemaphoreDestroy( sem );
+ return err_;
+}
+
+static NvError NvRmReadWriteConfigSpace_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmReadWriteConfigSpace_in *p_in;
+ NvRmReadWriteConfigSpace_out *p_out;
+ NvU8 *Data = NULL;
+
+ p_in = (NvRmReadWriteConfigSpace_in *)InBuffer;
+ p_out = (NvRmReadWriteConfigSpace_out *)((NvU8 *)OutBuffer + OFFSET(NvRmReadWriteConfigSpace_params, out) - OFFSET(NvRmReadWriteConfigSpace_params, inout));
+
+ if( p_in->DataLen && p_in->Data )
+ {
+ Data = (NvU8 *)NvOsAlloc( p_in->DataLen * sizeof( NvU8 ) );
+ if( !Data )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->Data )
+ {
+ err_ = NvOsCopyIn( Data, p_in->Data, p_in->DataLen * sizeof( NvU8 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmReadWriteConfigSpace( p_in->hDeviceHandle, p_in->bus_number, p_in->type, p_in->offset, Data, p_in->DataLen );
+
+ if(p_in->Data && Data)
+ {
+ err_ = NvOsCopyOut( p_in->Data, Data, p_in->DataLen * sizeof( NvU8 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( Data );
+ return err_;
+}
+
+NvError nvrm_pcie_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_pcie_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 4:
+ err_ = NvRmUnmapPciMemory_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 3:
+ err_ = NvRmMapPciMemory_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = NvRmRegisterPcieLegacyHandler_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmRegisterPcieMSIHandler_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmReadWriteConfigSpace_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_pinmux_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_pinmux_dispatch.c
new file mode 100644
index 000000000000..4064ca6e0029
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_pinmux_dispatch.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_pinmux.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmGetStraps_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvRmStrapGroup StrapGroup;
+} NV_ALIGN(4) NvRmGetStraps_in;
+
+typedef struct NvRmGetStraps_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGetStraps_inout;
+
+typedef struct NvRmGetStraps_out_t
+{
+ NvError ret_;
+ NvU32 pStrapValue;
+} NV_ALIGN(4) NvRmGetStraps_out;
+
+typedef struct NvRmGetStraps_params_t
+{
+ NvRmGetStraps_in in;
+ NvRmGetStraps_inout inout;
+ NvRmGetStraps_out out;
+} NvRmGetStraps_params;
+
+typedef struct NvRmGetModuleInterfaceCapabilities_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRm;
+ NvRmModuleID ModuleId;
+ NvU32 CapStructSize;
+ void* pCaps;
+} NV_ALIGN(4) NvRmGetModuleInterfaceCapabilities_in;
+
+typedef struct NvRmGetModuleInterfaceCapabilities_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmGetModuleInterfaceCapabilities_inout;
+
+typedef struct NvRmGetModuleInterfaceCapabilities_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmGetModuleInterfaceCapabilities_out;
+
+typedef struct NvRmGetModuleInterfaceCapabilities_params_t
+{
+ NvRmGetModuleInterfaceCapabilities_in in;
+ NvRmGetModuleInterfaceCapabilities_inout inout;
+ NvRmGetModuleInterfaceCapabilities_out out;
+} NvRmGetModuleInterfaceCapabilities_params;
+
+typedef struct NvRmExternalClockConfig_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvU32 IoModule;
+ NvU32 Instance;
+ NvU32 Config;
+ NvBool EnableTristate;
+} NV_ALIGN(4) NvRmExternalClockConfig_in;
+
+typedef struct NvRmExternalClockConfig_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmExternalClockConfig_inout;
+
+typedef struct NvRmExternalClockConfig_out_t
+{
+ NvU32 ret_;
+} NV_ALIGN(4) NvRmExternalClockConfig_out;
+
+typedef struct NvRmExternalClockConfig_params_t
+{
+ NvRmExternalClockConfig_in in;
+ NvRmExternalClockConfig_inout inout;
+ NvRmExternalClockConfig_out out;
+} NvRmExternalClockConfig_params;
+
+typedef struct NvRmSetOdmModuleTristate_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvU32 OdmModule;
+ NvU32 OdmInstance;
+ NvBool EnableTristate;
+} NV_ALIGN(4) NvRmSetOdmModuleTristate_in;
+
+typedef struct NvRmSetOdmModuleTristate_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmSetOdmModuleTristate_inout;
+
+typedef struct NvRmSetOdmModuleTristate_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmSetOdmModuleTristate_out;
+
+typedef struct NvRmSetOdmModuleTristate_params_t
+{
+ NvRmSetOdmModuleTristate_in in;
+ NvRmSetOdmModuleTristate_inout inout;
+ NvRmSetOdmModuleTristate_out out;
+} NvRmSetOdmModuleTristate_params;
+
+typedef struct NvRmSetModuleTristate_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvRmModuleID RmModule;
+ NvBool EnableTristate;
+} NV_ALIGN(4) NvRmSetModuleTristate_in;
+
+typedef struct NvRmSetModuleTristate_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmSetModuleTristate_inout;
+
+typedef struct NvRmSetModuleTristate_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmSetModuleTristate_out;
+
+typedef struct NvRmSetModuleTristate_params_t
+{
+ NvRmSetModuleTristate_in in;
+ NvRmSetModuleTristate_inout inout;
+ NvRmSetModuleTristate_out out;
+} NvRmSetModuleTristate_params;
+
+static NvError NvRmGetStraps_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGetStraps_in *p_in;
+ NvRmGetStraps_out *p_out;
+
+ p_in = (NvRmGetStraps_in *)InBuffer;
+ p_out = (NvRmGetStraps_out *)((NvU8 *)OutBuffer + OFFSET(NvRmGetStraps_params, out) - OFFSET(NvRmGetStraps_params, inout));
+
+
+ p_out->ret_ = NvRmGetStraps( p_in->hDevice, p_in->StrapGroup, &p_out->pStrapValue );
+
+ return err_;
+}
+
+static NvError NvRmGetModuleInterfaceCapabilities_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmGetModuleInterfaceCapabilities_in *p_in;
+ NvRmGetModuleInterfaceCapabilities_out *p_out;
+ void* pCaps = NULL;
+
+ p_in = (NvRmGetModuleInterfaceCapabilities_in *)InBuffer;
+ p_out = (NvRmGetModuleInterfaceCapabilities_out *)((NvU8 *)OutBuffer + OFFSET(NvRmGetModuleInterfaceCapabilities_params, out) - OFFSET(NvRmGetModuleInterfaceCapabilities_params, inout));
+
+ if( p_in->CapStructSize && p_in->pCaps )
+ {
+ pCaps = (void* )NvOsAlloc( p_in->CapStructSize );
+ if( !pCaps )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmGetModuleInterfaceCapabilities( p_in->hRm, p_in->ModuleId, p_in->CapStructSize, pCaps );
+
+ if(p_in->pCaps && pCaps)
+ {
+ err_ = NvOsCopyOut( p_in->pCaps, pCaps, p_in->CapStructSize );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pCaps );
+ return err_;
+}
+
+static NvError NvRmExternalClockConfig_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmExternalClockConfig_in *p_in;
+ NvRmExternalClockConfig_out *p_out;
+
+ p_in = (NvRmExternalClockConfig_in *)InBuffer;
+ p_out = (NvRmExternalClockConfig_out *)((NvU8 *)OutBuffer + OFFSET(NvRmExternalClockConfig_params, out) - OFFSET(NvRmExternalClockConfig_params, inout));
+
+
+ p_out->ret_ = NvRmExternalClockConfig( p_in->hDevice, p_in->IoModule, p_in->Instance, p_in->Config, p_in->EnableTristate );
+
+ return err_;
+}
+
+static NvError NvRmSetOdmModuleTristate_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmSetOdmModuleTristate_in *p_in;
+ NvRmSetOdmModuleTristate_out *p_out;
+
+ p_in = (NvRmSetOdmModuleTristate_in *)InBuffer;
+ p_out = (NvRmSetOdmModuleTristate_out *)((NvU8 *)OutBuffer + OFFSET(NvRmSetOdmModuleTristate_params, out) - OFFSET(NvRmSetOdmModuleTristate_params, inout));
+
+
+ p_out->ret_ = NvRmSetOdmModuleTristate( p_in->hDevice, p_in->OdmModule, p_in->OdmInstance, p_in->EnableTristate );
+
+ return err_;
+}
+
+static NvError NvRmSetModuleTristate_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmSetModuleTristate_in *p_in;
+ NvRmSetModuleTristate_out *p_out;
+
+ p_in = (NvRmSetModuleTristate_in *)InBuffer;
+ p_out = (NvRmSetModuleTristate_out *)((NvU8 *)OutBuffer + OFFSET(NvRmSetModuleTristate_params, out) - OFFSET(NvRmSetModuleTristate_params, inout));
+
+
+ p_out->ret_ = NvRmSetModuleTristate( p_in->hDevice, p_in->RmModule, p_in->EnableTristate );
+
+ return err_;
+}
+
+NvError nvrm_pinmux_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_pinmux_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 4:
+ err_ = NvRmGetStraps_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 3:
+ err_ = NvRmGetModuleInterfaceCapabilities_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = NvRmExternalClockConfig_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmSetOdmModuleTristate_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmSetModuleTristate_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_pmu_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_pmu_dispatch.c
new file mode 100644
index 000000000000..593d6e256bd7
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_pmu_dispatch.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_pmu.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmPmuIsRtcInitialized_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+} NV_ALIGN(4) NvRmPmuIsRtcInitialized_in;
+
+typedef struct NvRmPmuIsRtcInitialized_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuIsRtcInitialized_inout;
+
+typedef struct NvRmPmuIsRtcInitialized_out_t
+{
+ NvBool ret_;
+} NV_ALIGN(4) NvRmPmuIsRtcInitialized_out;
+
+typedef struct NvRmPmuIsRtcInitialized_params_t
+{
+ NvRmPmuIsRtcInitialized_in in;
+ NvRmPmuIsRtcInitialized_inout inout;
+ NvRmPmuIsRtcInitialized_out out;
+} NvRmPmuIsRtcInitialized_params;
+
+typedef struct NvRmPmuWriteRtc_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvU32 Count;
+} NV_ALIGN(4) NvRmPmuWriteRtc_in;
+
+typedef struct NvRmPmuWriteRtc_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuWriteRtc_inout;
+
+typedef struct NvRmPmuWriteRtc_out_t
+{
+ NvBool ret_;
+} NV_ALIGN(4) NvRmPmuWriteRtc_out;
+
+typedef struct NvRmPmuWriteRtc_params_t
+{
+ NvRmPmuWriteRtc_in in;
+ NvRmPmuWriteRtc_inout inout;
+ NvRmPmuWriteRtc_out out;
+} NvRmPmuWriteRtc_params;
+
+typedef struct NvRmPmuReadRtc_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+} NV_ALIGN(4) NvRmPmuReadRtc_in;
+
+typedef struct NvRmPmuReadRtc_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuReadRtc_inout;
+
+typedef struct NvRmPmuReadRtc_out_t
+{
+ NvBool ret_;
+ NvU32 pCount;
+} NV_ALIGN(4) NvRmPmuReadRtc_out;
+
+typedef struct NvRmPmuReadRtc_params_t
+{
+ NvRmPmuReadRtc_in in;
+ NvRmPmuReadRtc_inout inout;
+ NvRmPmuReadRtc_out out;
+} NvRmPmuReadRtc_params;
+
+typedef struct NvRmPmuGetBatteryChemistry_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvRmPmuBatteryInstance batteryInst;
+} NV_ALIGN(4) NvRmPmuGetBatteryChemistry_in;
+
+typedef struct NvRmPmuGetBatteryChemistry_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuGetBatteryChemistry_inout;
+
+typedef struct NvRmPmuGetBatteryChemistry_out_t
+{
+ NvRmPmuBatteryChemistry pChemistry;
+} NV_ALIGN(4) NvRmPmuGetBatteryChemistry_out;
+
+typedef struct NvRmPmuGetBatteryChemistry_params_t
+{
+ NvRmPmuGetBatteryChemistry_in in;
+ NvRmPmuGetBatteryChemistry_inout inout;
+ NvRmPmuGetBatteryChemistry_out out;
+} NvRmPmuGetBatteryChemistry_params;
+
+typedef struct NvRmPmuGetBatteryFullLifeTime_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvRmPmuBatteryInstance batteryInst;
+} NV_ALIGN(4) NvRmPmuGetBatteryFullLifeTime_in;
+
+typedef struct NvRmPmuGetBatteryFullLifeTime_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuGetBatteryFullLifeTime_inout;
+
+typedef struct NvRmPmuGetBatteryFullLifeTime_out_t
+{
+ NvU32 pLifeTime;
+} NV_ALIGN(4) NvRmPmuGetBatteryFullLifeTime_out;
+
+typedef struct NvRmPmuGetBatteryFullLifeTime_params_t
+{
+ NvRmPmuGetBatteryFullLifeTime_in in;
+ NvRmPmuGetBatteryFullLifeTime_inout inout;
+ NvRmPmuGetBatteryFullLifeTime_out out;
+} NvRmPmuGetBatteryFullLifeTime_params;
+
+typedef struct NvRmPmuGetBatteryData_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvRmPmuBatteryInstance batteryInst;
+} NV_ALIGN(4) NvRmPmuGetBatteryData_in;
+
+typedef struct NvRmPmuGetBatteryData_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuGetBatteryData_inout;
+
+typedef struct NvRmPmuGetBatteryData_out_t
+{
+ NvBool ret_;
+ NvRmPmuBatteryData pData;
+} NV_ALIGN(4) NvRmPmuGetBatteryData_out;
+
+typedef struct NvRmPmuGetBatteryData_params_t
+{
+ NvRmPmuGetBatteryData_in in;
+ NvRmPmuGetBatteryData_inout inout;
+ NvRmPmuGetBatteryData_out out;
+} NvRmPmuGetBatteryData_params;
+
+typedef struct NvRmPmuGetBatteryStatus_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvRmPmuBatteryInstance batteryInst;
+} NV_ALIGN(4) NvRmPmuGetBatteryStatus_in;
+
+typedef struct NvRmPmuGetBatteryStatus_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuGetBatteryStatus_inout;
+
+typedef struct NvRmPmuGetBatteryStatus_out_t
+{
+ NvBool ret_;
+ NvU8 pStatus;
+} NV_ALIGN(4) NvRmPmuGetBatteryStatus_out;
+
+typedef struct NvRmPmuGetBatteryStatus_params_t
+{
+ NvRmPmuGetBatteryStatus_in in;
+ NvRmPmuGetBatteryStatus_inout inout;
+ NvRmPmuGetBatteryStatus_out out;
+} NvRmPmuGetBatteryStatus_params;
+
+typedef struct NvRmPmuGetAcLineStatus_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+} NV_ALIGN(4) NvRmPmuGetAcLineStatus_in;
+
+typedef struct NvRmPmuGetAcLineStatus_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuGetAcLineStatus_inout;
+
+typedef struct NvRmPmuGetAcLineStatus_out_t
+{
+ NvBool ret_;
+ NvRmPmuAcLineStatus pStatus;
+} NV_ALIGN(4) NvRmPmuGetAcLineStatus_out;
+
+typedef struct NvRmPmuGetAcLineStatus_params_t
+{
+ NvRmPmuGetAcLineStatus_in in;
+ NvRmPmuGetAcLineStatus_inout inout;
+ NvRmPmuGetAcLineStatus_out out;
+} NvRmPmuGetAcLineStatus_params;
+
+typedef struct NvRmPmuSetChargingCurrentLimit_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvRmPmuChargingPath ChargingPath;
+ NvU32 ChargingCurrentLimitMa;
+ NvU32 ChargerType;
+} NV_ALIGN(4) NvRmPmuSetChargingCurrentLimit_in;
+
+typedef struct NvRmPmuSetChargingCurrentLimit_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuSetChargingCurrentLimit_inout;
+
+typedef struct NvRmPmuSetChargingCurrentLimit_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuSetChargingCurrentLimit_out;
+
+typedef struct NvRmPmuSetChargingCurrentLimit_params_t
+{
+ NvRmPmuSetChargingCurrentLimit_in in;
+ NvRmPmuSetChargingCurrentLimit_inout inout;
+ NvRmPmuSetChargingCurrentLimit_out out;
+} NvRmPmuSetChargingCurrentLimit_params;
+
+typedef struct NvRmPmuSetSocRailPowerState_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvU32 vddId;
+ NvBool Enable;
+} NV_ALIGN(4) NvRmPmuSetSocRailPowerState_in;
+
+typedef struct NvRmPmuSetSocRailPowerState_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuSetSocRailPowerState_inout;
+
+typedef struct NvRmPmuSetSocRailPowerState_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuSetSocRailPowerState_out;
+
+typedef struct NvRmPmuSetSocRailPowerState_params_t
+{
+ NvRmPmuSetSocRailPowerState_in in;
+ NvRmPmuSetSocRailPowerState_inout inout;
+ NvRmPmuSetSocRailPowerState_out out;
+} NvRmPmuSetSocRailPowerState_params;
+
+typedef struct NvRmPmuSetVoltage_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvU32 vddId;
+ NvU32 MilliVolts;
+} NV_ALIGN(4) NvRmPmuSetVoltage_in;
+
+typedef struct NvRmPmuSetVoltage_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuSetVoltage_inout;
+
+typedef struct NvRmPmuSetVoltage_out_t
+{
+ NvU32 pSettleMicroSeconds;
+} NV_ALIGN(4) NvRmPmuSetVoltage_out;
+
+typedef struct NvRmPmuSetVoltage_params_t
+{
+ NvRmPmuSetVoltage_in in;
+ NvRmPmuSetVoltage_inout inout;
+ NvRmPmuSetVoltage_out out;
+} NvRmPmuSetVoltage_params;
+
+typedef struct NvRmPmuGetVoltage_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvU32 vddId;
+} NV_ALIGN(4) NvRmPmuGetVoltage_in;
+
+typedef struct NvRmPmuGetVoltage_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuGetVoltage_inout;
+
+typedef struct NvRmPmuGetVoltage_out_t
+{
+ NvU32 pMilliVolts;
+} NV_ALIGN(4) NvRmPmuGetVoltage_out;
+
+typedef struct NvRmPmuGetVoltage_params_t
+{
+ NvRmPmuGetVoltage_in in;
+ NvRmPmuGetVoltage_inout inout;
+ NvRmPmuGetVoltage_out out;
+} NvRmPmuGetVoltage_params;
+
+typedef struct NvRmPmuGetCapabilities_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+ NvU32 vddId;
+} NV_ALIGN(4) NvRmPmuGetCapabilities_in;
+
+typedef struct NvRmPmuGetCapabilities_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPmuGetCapabilities_inout;
+
+typedef struct NvRmPmuGetCapabilities_out_t
+{
+ NvRmPmuVddRailCapabilities pCapabilities;
+} NV_ALIGN(4) NvRmPmuGetCapabilities_out;
+
+typedef struct NvRmPmuGetCapabilities_params_t
+{
+ NvRmPmuGetCapabilities_in in;
+ NvRmPmuGetCapabilities_inout inout;
+ NvRmPmuGetCapabilities_out out;
+} NvRmPmuGetCapabilities_params;
+
+static NvError NvRmPmuIsRtcInitialized_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuIsRtcInitialized_in *p_in;
+ NvRmPmuIsRtcInitialized_out *p_out;
+
+ p_in = (NvRmPmuIsRtcInitialized_in *)InBuffer;
+ p_out = (NvRmPmuIsRtcInitialized_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPmuIsRtcInitialized_params, out) - OFFSET(NvRmPmuIsRtcInitialized_params, inout));
+
+
+ p_out->ret_ = NvRmPmuIsRtcInitialized( p_in->hRmDevice );
+
+ return err_;
+}
+
+static NvError NvRmPmuWriteRtc_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuWriteRtc_in *p_in;
+ NvRmPmuWriteRtc_out *p_out;
+
+ p_in = (NvRmPmuWriteRtc_in *)InBuffer;
+ p_out = (NvRmPmuWriteRtc_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPmuWriteRtc_params, out) - OFFSET(NvRmPmuWriteRtc_params, inout));
+
+
+ p_out->ret_ = NvRmPmuWriteRtc( p_in->hRmDevice, p_in->Count );
+
+ return err_;
+}
+
+static NvError NvRmPmuReadRtc_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuReadRtc_in *p_in;
+ NvRmPmuReadRtc_out *p_out;
+
+ p_in = (NvRmPmuReadRtc_in *)InBuffer;
+ p_out = (NvRmPmuReadRtc_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPmuReadRtc_params, out) - OFFSET(NvRmPmuReadRtc_params, inout));
+
+
+ p_out->ret_ = NvRmPmuReadRtc( p_in->hRmDevice, &p_out->pCount );
+
+ return err_;
+}
+
+static NvError NvRmPmuGetBatteryChemistry_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuGetBatteryChemistry_in *p_in;
+ NvRmPmuGetBatteryChemistry_out *p_out;
+
+ p_in = (NvRmPmuGetBatteryChemistry_in *)InBuffer;
+ p_out = (NvRmPmuGetBatteryChemistry_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPmuGetBatteryChemistry_params, out) - OFFSET(NvRmPmuGetBatteryChemistry_params, inout));
+
+
+ NvRmPmuGetBatteryChemistry( p_in->hRmDevice, p_in->batteryInst, &p_out->pChemistry );
+
+ return err_;
+}
+
+static NvError NvRmPmuGetBatteryFullLifeTime_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuGetBatteryFullLifeTime_in *p_in;
+ NvRmPmuGetBatteryFullLifeTime_out *p_out;
+
+ p_in = (NvRmPmuGetBatteryFullLifeTime_in *)InBuffer;
+ p_out = (NvRmPmuGetBatteryFullLifeTime_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPmuGetBatteryFullLifeTime_params, out) - OFFSET(NvRmPmuGetBatteryFullLifeTime_params, inout));
+
+
+ NvRmPmuGetBatteryFullLifeTime( p_in->hRmDevice, p_in->batteryInst, &p_out->pLifeTime );
+
+ return err_;
+}
+
+static NvError NvRmPmuGetBatteryData_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuGetBatteryData_in *p_in;
+ NvRmPmuGetBatteryData_out *p_out;
+
+ p_in = (NvRmPmuGetBatteryData_in *)InBuffer;
+ p_out = (NvRmPmuGetBatteryData_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPmuGetBatteryData_params, out) - OFFSET(NvRmPmuGetBatteryData_params, inout));
+
+
+ p_out->ret_ = NvRmPmuGetBatteryData( p_in->hRmDevice, p_in->batteryInst, &p_out->pData );
+
+ return err_;
+}
+
+static NvError NvRmPmuGetBatteryStatus_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuGetBatteryStatus_in *p_in;
+ NvRmPmuGetBatteryStatus_out *p_out;
+
+ p_in = (NvRmPmuGetBatteryStatus_in *)InBuffer;
+ p_out = (NvRmPmuGetBatteryStatus_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPmuGetBatteryStatus_params, out) - OFFSET(NvRmPmuGetBatteryStatus_params, inout));
+
+
+ p_out->ret_ = NvRmPmuGetBatteryStatus( p_in->hRmDevice, p_in->batteryInst, &p_out->pStatus );
+
+ return err_;
+}
+
+static NvError NvRmPmuGetAcLineStatus_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuGetAcLineStatus_in *p_in;
+ NvRmPmuGetAcLineStatus_out *p_out;
+
+ p_in = (NvRmPmuGetAcLineStatus_in *)InBuffer;
+ p_out = (NvRmPmuGetAcLineStatus_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPmuGetAcLineStatus_params, out) - OFFSET(NvRmPmuGetAcLineStatus_params, inout));
+
+
+ p_out->ret_ = NvRmPmuGetAcLineStatus( p_in->hRmDevice, &p_out->pStatus );
+
+ return err_;
+}
+
+static NvError NvRmPmuSetChargingCurrentLimit_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuSetChargingCurrentLimit_in *p_in;
+
+ p_in = (NvRmPmuSetChargingCurrentLimit_in *)InBuffer;
+
+
+ NvRmPmuSetChargingCurrentLimit( p_in->hRmDevice, p_in->ChargingPath, p_in->ChargingCurrentLimitMa, p_in->ChargerType );
+
+ return err_;
+}
+
+static NvError NvRmPmuSetSocRailPowerState_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuSetSocRailPowerState_in *p_in;
+
+ p_in = (NvRmPmuSetSocRailPowerState_in *)InBuffer;
+
+
+ NvRmPmuSetSocRailPowerState( p_in->hDevice, p_in->vddId, p_in->Enable );
+
+ return err_;
+}
+
+static NvError NvRmPmuSetVoltage_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuSetVoltage_in *p_in;
+ NvRmPmuSetVoltage_out *p_out;
+
+ p_in = (NvRmPmuSetVoltage_in *)InBuffer;
+ p_out = (NvRmPmuSetVoltage_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPmuSetVoltage_params, out) - OFFSET(NvRmPmuSetVoltage_params, inout));
+
+
+ NvRmPmuSetVoltage( p_in->hDevice, p_in->vddId, p_in->MilliVolts, &p_out->pSettleMicroSeconds );
+
+ return err_;
+}
+
+static NvError NvRmPmuGetVoltage_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuGetVoltage_in *p_in;
+ NvRmPmuGetVoltage_out *p_out;
+
+ p_in = (NvRmPmuGetVoltage_in *)InBuffer;
+ p_out = (NvRmPmuGetVoltage_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPmuGetVoltage_params, out) - OFFSET(NvRmPmuGetVoltage_params, inout));
+
+
+ NvRmPmuGetVoltage( p_in->hDevice, p_in->vddId, &p_out->pMilliVolts );
+
+ return err_;
+}
+
+static NvError NvRmPmuGetCapabilities_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPmuGetCapabilities_in *p_in;
+ NvRmPmuGetCapabilities_out *p_out;
+
+ p_in = (NvRmPmuGetCapabilities_in *)InBuffer;
+ p_out = (NvRmPmuGetCapabilities_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPmuGetCapabilities_params, out) - OFFSET(NvRmPmuGetCapabilities_params, inout));
+
+
+ NvRmPmuGetCapabilities( p_in->hDevice, p_in->vddId, &p_out->pCapabilities );
+
+ return err_;
+}
+
+NvError nvrm_pmu_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_pmu_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 12:
+ err_ = NvRmPmuIsRtcInitialized_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 11:
+ err_ = NvRmPmuWriteRtc_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 10:
+ err_ = NvRmPmuReadRtc_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 9:
+ err_ = NvRmPmuGetBatteryChemistry_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 8:
+ err_ = NvRmPmuGetBatteryFullLifeTime_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 7:
+ err_ = NvRmPmuGetBatteryData_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 6:
+ err_ = NvRmPmuGetBatteryStatus_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 5:
+ err_ = NvRmPmuGetAcLineStatus_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 4:
+ err_ = NvRmPmuSetChargingCurrentLimit_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 3:
+ err_ = NvRmPmuSetSocRailPowerState_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = NvRmPmuSetVoltage_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmPmuGetVoltage_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmPmuGetCapabilities_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_power_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_power_dispatch.c
new file mode 100644
index 000000000000..cf70ec5a9b45
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_power_dispatch.c
@@ -0,0 +1,1916 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_power.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmKernelPowerResume_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+} NV_ALIGN(4) NvRmKernelPowerResume_in;
+
+typedef struct NvRmKernelPowerResume_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmKernelPowerResume_inout;
+
+typedef struct NvRmKernelPowerResume_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmKernelPowerResume_out;
+
+typedef struct NvRmKernelPowerResume_params_t
+{
+ NvRmKernelPowerResume_in in;
+ NvRmKernelPowerResume_inout inout;
+ NvRmKernelPowerResume_out out;
+} NvRmKernelPowerResume_params;
+
+typedef struct NvRmKernelPowerSuspend_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+} NV_ALIGN(4) NvRmKernelPowerSuspend_in;
+
+typedef struct NvRmKernelPowerSuspend_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmKernelPowerSuspend_inout;
+
+typedef struct NvRmKernelPowerSuspend_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmKernelPowerSuspend_out;
+
+typedef struct NvRmKernelPowerSuspend_params_t
+{
+ NvRmKernelPowerSuspend_in in;
+ NvRmKernelPowerSuspend_inout inout;
+ NvRmKernelPowerSuspend_out out;
+} NvRmKernelPowerSuspend_params;
+
+typedef struct NvRmDfsSetLowVoltageThreshold_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmDfsVoltageRailId RailId;
+ NvRmMilliVolts LowMv;
+} NV_ALIGN(4) NvRmDfsSetLowVoltageThreshold_in;
+
+typedef struct NvRmDfsSetLowVoltageThreshold_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsSetLowVoltageThreshold_inout;
+
+typedef struct NvRmDfsSetLowVoltageThreshold_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsSetLowVoltageThreshold_out;
+
+typedef struct NvRmDfsSetLowVoltageThreshold_params_t
+{
+ NvRmDfsSetLowVoltageThreshold_in in;
+ NvRmDfsSetLowVoltageThreshold_inout inout;
+ NvRmDfsSetLowVoltageThreshold_out out;
+} NvRmDfsSetLowVoltageThreshold_params;
+
+typedef struct NvRmDfsGetLowVoltageThreshold_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmDfsVoltageRailId RailId;
+} NV_ALIGN(4) NvRmDfsGetLowVoltageThreshold_in;
+
+typedef struct NvRmDfsGetLowVoltageThreshold_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsGetLowVoltageThreshold_inout;
+
+typedef struct NvRmDfsGetLowVoltageThreshold_out_t
+{
+ NvRmMilliVolts pLowMv;
+ NvRmMilliVolts pPresentMv;
+} NV_ALIGN(4) NvRmDfsGetLowVoltageThreshold_out;
+
+typedef struct NvRmDfsGetLowVoltageThreshold_params_t
+{
+ NvRmDfsGetLowVoltageThreshold_in in;
+ NvRmDfsGetLowVoltageThreshold_inout inout;
+ NvRmDfsGetLowVoltageThreshold_out out;
+} NvRmDfsGetLowVoltageThreshold_params;
+
+typedef struct NvRmDfsLogBusyGetEntry_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvU32 EntryIndex;
+} NV_ALIGN(4) NvRmDfsLogBusyGetEntry_in;
+
+typedef struct NvRmDfsLogBusyGetEntry_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsLogBusyGetEntry_inout;
+
+typedef struct NvRmDfsLogBusyGetEntry_out_t
+{
+ NvError ret_;
+ NvU32 pSampleIndex;
+ NvU32 pClientId;
+ NvU32 pClientTag;
+ NvRmDfsBusyHint pBusyHint;
+} NV_ALIGN(4) NvRmDfsLogBusyGetEntry_out;
+
+typedef struct NvRmDfsLogBusyGetEntry_params_t
+{
+ NvRmDfsLogBusyGetEntry_in in;
+ NvRmDfsLogBusyGetEntry_inout inout;
+ NvRmDfsLogBusyGetEntry_out out;
+} NvRmDfsLogBusyGetEntry_params;
+
+typedef struct NvRmDfsLogStarvationGetEntry_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvU32 EntryIndex;
+} NV_ALIGN(4) NvRmDfsLogStarvationGetEntry_in;
+
+typedef struct NvRmDfsLogStarvationGetEntry_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsLogStarvationGetEntry_inout;
+
+typedef struct NvRmDfsLogStarvationGetEntry_out_t
+{
+ NvError ret_;
+ NvU32 pSampleIndex;
+ NvU32 pClientId;
+ NvU32 pClientTag;
+ NvRmDfsStarvationHint pStarvationHint;
+} NV_ALIGN(4) NvRmDfsLogStarvationGetEntry_out;
+
+typedef struct NvRmDfsLogStarvationGetEntry_params_t
+{
+ NvRmDfsLogStarvationGetEntry_in in;
+ NvRmDfsLogStarvationGetEntry_inout inout;
+ NvRmDfsLogStarvationGetEntry_out out;
+} NvRmDfsLogStarvationGetEntry_params;
+
+typedef struct NvRmDfsLogActivityGetEntry_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvU32 EntryIndex;
+ NvU32 LogDomainsCount;
+ NvU32 * pActiveCyclesList;
+ NvRmFreqKHz * pAveragesList;
+ NvRmFreqKHz * pFrequenciesList;
+} NV_ALIGN(4) NvRmDfsLogActivityGetEntry_in;
+
+typedef struct NvRmDfsLogActivityGetEntry_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsLogActivityGetEntry_inout;
+
+typedef struct NvRmDfsLogActivityGetEntry_out_t
+{
+ NvError ret_;
+ NvU32 pIntervalMs;
+ NvU32 pLp2TimeMs;
+} NV_ALIGN(4) NvRmDfsLogActivityGetEntry_out;
+
+typedef struct NvRmDfsLogActivityGetEntry_params_t
+{
+ NvRmDfsLogActivityGetEntry_in in;
+ NvRmDfsLogActivityGetEntry_inout inout;
+ NvRmDfsLogActivityGetEntry_out out;
+} NvRmDfsLogActivityGetEntry_params;
+
+typedef struct NvRmDfsLogGetMeanFrequencies_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvU32 LogMeanFreqListCount;
+ NvRmFreqKHz * pLogMeanFreqList;
+} NV_ALIGN(4) NvRmDfsLogGetMeanFrequencies_in;
+
+typedef struct NvRmDfsLogGetMeanFrequencies_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsLogGetMeanFrequencies_inout;
+
+typedef struct NvRmDfsLogGetMeanFrequencies_out_t
+{
+ NvError ret_;
+ NvU32 pLogLp2TimeMs;
+ NvU32 pLogLp2Entries;
+} NV_ALIGN(4) NvRmDfsLogGetMeanFrequencies_out;
+
+typedef struct NvRmDfsLogGetMeanFrequencies_params_t
+{
+ NvRmDfsLogGetMeanFrequencies_in in;
+ NvRmDfsLogGetMeanFrequencies_inout inout;
+ NvRmDfsLogGetMeanFrequencies_out out;
+} NvRmDfsLogGetMeanFrequencies_params;
+
+typedef struct NvRmDfsLogStart_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+} NV_ALIGN(4) NvRmDfsLogStart_in;
+
+typedef struct NvRmDfsLogStart_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsLogStart_inout;
+
+typedef struct NvRmDfsLogStart_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsLogStart_out;
+
+typedef struct NvRmDfsLogStart_params_t
+{
+ NvRmDfsLogStart_in in;
+ NvRmDfsLogStart_inout inout;
+ NvRmDfsLogStart_out out;
+} NvRmDfsLogStart_params;
+
+typedef struct NvRmDfsGetProfileData_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvU32 DfsProfileCount;
+ NvU32 * pSamplesNoList;
+ NvU32 * pProfileTimeUsList;
+} NV_ALIGN(4) NvRmDfsGetProfileData_in;
+
+typedef struct NvRmDfsGetProfileData_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsGetProfileData_inout;
+
+typedef struct NvRmDfsGetProfileData_out_t
+{
+ NvError ret_;
+ NvU32 pDfsPeriodUs;
+} NV_ALIGN(4) NvRmDfsGetProfileData_out;
+
+typedef struct NvRmDfsGetProfileData_params_t
+{
+ NvRmDfsGetProfileData_in in;
+ NvRmDfsGetProfileData_inout inout;
+ NvRmDfsGetProfileData_out out;
+} NvRmDfsGetProfileData_params;
+
+typedef struct NvRmDfsSetAvHighCorner_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmFreqKHz DfsSystemHighKHz;
+ NvRmFreqKHz DfsAvpHighKHz;
+ NvRmFreqKHz DfsVpipeHighKHz;
+} NV_ALIGN(4) NvRmDfsSetAvHighCorner_in;
+
+typedef struct NvRmDfsSetAvHighCorner_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsSetAvHighCorner_inout;
+
+typedef struct NvRmDfsSetAvHighCorner_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDfsSetAvHighCorner_out;
+
+typedef struct NvRmDfsSetAvHighCorner_params_t
+{
+ NvRmDfsSetAvHighCorner_in in;
+ NvRmDfsSetAvHighCorner_inout inout;
+ NvRmDfsSetAvHighCorner_out out;
+} NvRmDfsSetAvHighCorner_params;
+
+typedef struct NvRmDfsSetCpuEmcHighCorner_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmFreqKHz DfsCpuHighKHz;
+ NvRmFreqKHz DfsEmcHighKHz;
+} NV_ALIGN(4) NvRmDfsSetCpuEmcHighCorner_in;
+
+typedef struct NvRmDfsSetCpuEmcHighCorner_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsSetCpuEmcHighCorner_inout;
+
+typedef struct NvRmDfsSetCpuEmcHighCorner_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDfsSetCpuEmcHighCorner_out;
+
+typedef struct NvRmDfsSetCpuEmcHighCorner_params_t
+{
+ NvRmDfsSetCpuEmcHighCorner_in in;
+ NvRmDfsSetCpuEmcHighCorner_inout inout;
+ NvRmDfsSetCpuEmcHighCorner_out out;
+} NvRmDfsSetCpuEmcHighCorner_params;
+
+typedef struct NvRmDfsSetEmcEnvelope_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmFreqKHz DfsEmcLowCornerKHz;
+ NvRmFreqKHz DfsEmcHighCornerKHz;
+} NV_ALIGN(4) NvRmDfsSetEmcEnvelope_in;
+
+typedef struct NvRmDfsSetEmcEnvelope_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsSetEmcEnvelope_inout;
+
+typedef struct NvRmDfsSetEmcEnvelope_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDfsSetEmcEnvelope_out;
+
+typedef struct NvRmDfsSetEmcEnvelope_params_t
+{
+ NvRmDfsSetEmcEnvelope_in in;
+ NvRmDfsSetEmcEnvelope_inout inout;
+ NvRmDfsSetEmcEnvelope_out out;
+} NvRmDfsSetEmcEnvelope_params;
+
+typedef struct NvRmDfsSetCpuEnvelope_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmFreqKHz DfsCpuLowCornerKHz;
+ NvRmFreqKHz DfsCpuHighCornerKHz;
+} NV_ALIGN(4) NvRmDfsSetCpuEnvelope_in;
+
+typedef struct NvRmDfsSetCpuEnvelope_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsSetCpuEnvelope_inout;
+
+typedef struct NvRmDfsSetCpuEnvelope_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDfsSetCpuEnvelope_out;
+
+typedef struct NvRmDfsSetCpuEnvelope_params_t
+{
+ NvRmDfsSetCpuEnvelope_in in;
+ NvRmDfsSetCpuEnvelope_inout inout;
+ NvRmDfsSetCpuEnvelope_out out;
+} NvRmDfsSetCpuEnvelope_params;
+
+typedef struct NvRmDfsSetTarget_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvU32 DfsFreqListCount;
+ NvRmFreqKHz * pDfsTargetFreqList;
+} NV_ALIGN(4) NvRmDfsSetTarget_in;
+
+typedef struct NvRmDfsSetTarget_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsSetTarget_inout;
+
+typedef struct NvRmDfsSetTarget_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDfsSetTarget_out;
+
+typedef struct NvRmDfsSetTarget_params_t
+{
+ NvRmDfsSetTarget_in in;
+ NvRmDfsSetTarget_inout inout;
+ NvRmDfsSetTarget_out out;
+} NvRmDfsSetTarget_params;
+
+typedef struct NvRmDfsSetLowCorner_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvU32 DfsFreqListCount;
+ NvRmFreqKHz * pDfsLowFreqList;
+} NV_ALIGN(4) NvRmDfsSetLowCorner_in;
+
+typedef struct NvRmDfsSetLowCorner_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsSetLowCorner_inout;
+
+typedef struct NvRmDfsSetLowCorner_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDfsSetLowCorner_out;
+
+typedef struct NvRmDfsSetLowCorner_params_t
+{
+ NvRmDfsSetLowCorner_in in;
+ NvRmDfsSetLowCorner_inout inout;
+ NvRmDfsSetLowCorner_out out;
+} NvRmDfsSetLowCorner_params;
+
+typedef struct NvRmDfsSetState_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmDfsRunState NewDfsRunState;
+} NV_ALIGN(4) NvRmDfsSetState_in;
+
+typedef struct NvRmDfsSetState_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsSetState_inout;
+
+typedef struct NvRmDfsSetState_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmDfsSetState_out;
+
+typedef struct NvRmDfsSetState_params_t
+{
+ NvRmDfsSetState_in in;
+ NvRmDfsSetState_inout inout;
+ NvRmDfsSetState_out out;
+} NvRmDfsSetState_params;
+
+typedef struct NvRmDfsGetClockUtilization_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmDfsClockId ClockId;
+} NV_ALIGN(4) NvRmDfsGetClockUtilization_in;
+
+typedef struct NvRmDfsGetClockUtilization_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsGetClockUtilization_inout;
+
+typedef struct NvRmDfsGetClockUtilization_out_t
+{
+ NvError ret_;
+ NvRmDfsClockUsage pClockUsage;
+} NV_ALIGN(4) NvRmDfsGetClockUtilization_out;
+
+typedef struct NvRmDfsGetClockUtilization_params_t
+{
+ NvRmDfsGetClockUtilization_in in;
+ NvRmDfsGetClockUtilization_inout inout;
+ NvRmDfsGetClockUtilization_out out;
+} NvRmDfsGetClockUtilization_params;
+
+typedef struct NvRmDfsGetState_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+} NV_ALIGN(4) NvRmDfsGetState_in;
+
+typedef struct NvRmDfsGetState_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmDfsGetState_inout;
+
+typedef struct NvRmDfsGetState_out_t
+{
+ NvRmDfsRunState ret_;
+} NV_ALIGN(4) NvRmDfsGetState_out;
+
+typedef struct NvRmDfsGetState_params_t
+{
+ NvRmDfsGetState_in in;
+ NvRmDfsGetState_inout inout;
+ NvRmDfsGetState_out out;
+} NvRmDfsGetState_params;
+
+typedef struct NvRmPowerActivityHint_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID ModuleId;
+ NvU32 ClientId;
+ NvU32 ActivityDurationMs;
+} NV_ALIGN(4) NvRmPowerActivityHint_in;
+
+typedef struct NvRmPowerActivityHint_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerActivityHint_inout;
+
+typedef struct NvRmPowerActivityHint_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmPowerActivityHint_out;
+
+typedef struct NvRmPowerActivityHint_params_t
+{
+ NvRmPowerActivityHint_in in;
+ NvRmPowerActivityHint_inout inout;
+ NvRmPowerActivityHint_out out;
+} NvRmPowerActivityHint_params;
+
+typedef struct NvRmPowerStarvationHintMulti_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvU32 ClientId;
+ NvRmDfsStarvationHint * pMultiHint;
+ NvU32 NumHints;
+} NV_ALIGN(4) NvRmPowerStarvationHintMulti_in;
+
+typedef struct NvRmPowerStarvationHintMulti_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerStarvationHintMulti_inout;
+
+typedef struct NvRmPowerStarvationHintMulti_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmPowerStarvationHintMulti_out;
+
+typedef struct NvRmPowerStarvationHintMulti_params_t
+{
+ NvRmPowerStarvationHintMulti_in in;
+ NvRmPowerStarvationHintMulti_inout inout;
+ NvRmPowerStarvationHintMulti_out out;
+} NvRmPowerStarvationHintMulti_params;
+
+typedef struct NvRmPowerStarvationHint_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmDfsClockId ClockId;
+ NvU32 ClientId;
+ NvBool Starving;
+} NV_ALIGN(4) NvRmPowerStarvationHint_in;
+
+typedef struct NvRmPowerStarvationHint_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerStarvationHint_inout;
+
+typedef struct NvRmPowerStarvationHint_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmPowerStarvationHint_out;
+
+typedef struct NvRmPowerStarvationHint_params_t
+{
+ NvRmPowerStarvationHint_in in;
+ NvRmPowerStarvationHint_inout inout;
+ NvRmPowerStarvationHint_out out;
+} NvRmPowerStarvationHint_params;
+
+typedef struct NvRmPowerBusyHintMulti_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvU32 ClientId;
+ NvRmDfsBusyHint * pMultiHint;
+ NvU32 NumHints;
+ NvRmDfsBusyHintSyncMode Mode;
+} NV_ALIGN(4) NvRmPowerBusyHintMulti_in;
+
+typedef struct NvRmPowerBusyHintMulti_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerBusyHintMulti_inout;
+
+typedef struct NvRmPowerBusyHintMulti_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmPowerBusyHintMulti_out;
+
+typedef struct NvRmPowerBusyHintMulti_params_t
+{
+ NvRmPowerBusyHintMulti_in in;
+ NvRmPowerBusyHintMulti_inout inout;
+ NvRmPowerBusyHintMulti_out out;
+} NvRmPowerBusyHintMulti_params;
+
+typedef struct NvRmPowerBusyHint_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmDfsClockId ClockId;
+ NvU32 ClientId;
+ NvU32 BoostDurationMs;
+ NvRmFreqKHz BoostKHz;
+} NV_ALIGN(4) NvRmPowerBusyHint_in;
+
+typedef struct NvRmPowerBusyHint_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerBusyHint_inout;
+
+typedef struct NvRmPowerBusyHint_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmPowerBusyHint_out;
+
+typedef struct NvRmPowerBusyHint_params_t
+{
+ NvRmPowerBusyHint_in in;
+ NvRmPowerBusyHint_inout inout;
+ NvRmPowerBusyHint_out out;
+} NvRmPowerBusyHint_params;
+
+typedef struct NvRmListPowerAwareModules_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID * pIdList;
+ NvBool * pActiveList;
+} NV_ALIGN(4) NvRmListPowerAwareModules_in;
+
+typedef struct NvRmListPowerAwareModules_inout_t
+{
+ NvU32 pListSize;
+} NV_ALIGN(4) NvRmListPowerAwareModules_inout;
+
+typedef struct NvRmListPowerAwareModules_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmListPowerAwareModules_out;
+
+typedef struct NvRmListPowerAwareModules_params_t
+{
+ NvRmListPowerAwareModules_in in;
+ NvRmListPowerAwareModules_inout inout;
+ NvRmListPowerAwareModules_out out;
+} NvRmListPowerAwareModules_params;
+
+typedef struct NvRmPowerVoltageControl_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID ModuleId;
+ NvU32 ClientId;
+ NvRmMilliVolts MinVolts;
+ NvRmMilliVolts MaxVolts;
+ NvRmMilliVolts * PrefVoltageList;
+ NvU32 PrefVoltageListCount;
+} NV_ALIGN(4) NvRmPowerVoltageControl_in;
+
+typedef struct NvRmPowerVoltageControl_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerVoltageControl_inout;
+
+typedef struct NvRmPowerVoltageControl_out_t
+{
+ NvError ret_;
+ NvRmMilliVolts CurrentVolts;
+} NV_ALIGN(4) NvRmPowerVoltageControl_out;
+
+typedef struct NvRmPowerVoltageControl_params_t
+{
+ NvRmPowerVoltageControl_in in;
+ NvRmPowerVoltageControl_inout inout;
+ NvRmPowerVoltageControl_out out;
+} NvRmPowerVoltageControl_params;
+
+typedef struct NvRmPowerModuleClockControl_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID ModuleId;
+ NvU32 ClientId;
+ NvBool Enable;
+} NV_ALIGN(4) NvRmPowerModuleClockControl_in;
+
+typedef struct NvRmPowerModuleClockControl_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerModuleClockControl_inout;
+
+typedef struct NvRmPowerModuleClockControl_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmPowerModuleClockControl_out;
+
+typedef struct NvRmPowerModuleClockControl_params_t
+{
+ NvRmPowerModuleClockControl_in in;
+ NvRmPowerModuleClockControl_inout inout;
+ NvRmPowerModuleClockControl_out out;
+} NvRmPowerModuleClockControl_params;
+
+typedef struct NvRmPowerModuleClockConfig_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID ModuleId;
+ NvU32 ClientId;
+ NvRmFreqKHz MinFreq;
+ NvRmFreqKHz MaxFreq;
+ NvRmFreqKHz * PrefFreqList;
+ NvU32 PrefFreqListCount;
+ NvU32 flags;
+} NV_ALIGN(4) NvRmPowerModuleClockConfig_in;
+
+typedef struct NvRmPowerModuleClockConfig_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerModuleClockConfig_inout;
+
+typedef struct NvRmPowerModuleClockConfig_out_t
+{
+ NvError ret_;
+ NvRmFreqKHz CurrentFreq;
+} NV_ALIGN(4) NvRmPowerModuleClockConfig_out;
+
+typedef struct NvRmPowerModuleClockConfig_params_t
+{
+ NvRmPowerModuleClockConfig_in in;
+ NvRmPowerModuleClockConfig_inout inout;
+ NvRmPowerModuleClockConfig_out out;
+} NvRmPowerModuleClockConfig_params;
+
+typedef struct NvRmPowerModuleGetMaxFrequency_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmModuleID ModuleId;
+} NV_ALIGN(4) NvRmPowerModuleGetMaxFrequency_in;
+
+typedef struct NvRmPowerModuleGetMaxFrequency_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerModuleGetMaxFrequency_inout;
+
+typedef struct NvRmPowerModuleGetMaxFrequency_out_t
+{
+ NvRmFreqKHz ret_;
+} NV_ALIGN(4) NvRmPowerModuleGetMaxFrequency_out;
+
+typedef struct NvRmPowerModuleGetMaxFrequency_params_t
+{
+ NvRmPowerModuleGetMaxFrequency_in in;
+ NvRmPowerModuleGetMaxFrequency_inout inout;
+ NvRmPowerModuleGetMaxFrequency_out out;
+} NvRmPowerModuleGetMaxFrequency_params;
+
+typedef struct NvRmPowerGetPrimaryFrequency_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+} NV_ALIGN(4) NvRmPowerGetPrimaryFrequency_in;
+
+typedef struct NvRmPowerGetPrimaryFrequency_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerGetPrimaryFrequency_inout;
+
+typedef struct NvRmPowerGetPrimaryFrequency_out_t
+{
+ NvRmFreqKHz ret_;
+} NV_ALIGN(4) NvRmPowerGetPrimaryFrequency_out;
+
+typedef struct NvRmPowerGetPrimaryFrequency_params_t
+{
+ NvRmPowerGetPrimaryFrequency_in in;
+ NvRmPowerGetPrimaryFrequency_inout inout;
+ NvRmPowerGetPrimaryFrequency_out out;
+} NvRmPowerGetPrimaryFrequency_params;
+
+typedef struct NvRmPowerGetState_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+} NV_ALIGN(4) NvRmPowerGetState_in;
+
+typedef struct NvRmPowerGetState_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerGetState_inout;
+
+typedef struct NvRmPowerGetState_out_t
+{
+ NvError ret_;
+ NvRmPowerState pState;
+} NV_ALIGN(4) NvRmPowerGetState_out;
+
+typedef struct NvRmPowerGetState_params_t
+{
+ NvRmPowerGetState_in in;
+ NvRmPowerGetState_inout inout;
+ NvRmPowerGetState_out out;
+} NvRmPowerGetState_params;
+
+typedef struct NvRmPowerEventNotify_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvRmPowerEvent Event;
+} NV_ALIGN(4) NvRmPowerEventNotify_in;
+
+typedef struct NvRmPowerEventNotify_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerEventNotify_inout;
+
+typedef struct NvRmPowerEventNotify_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerEventNotify_out;
+
+typedef struct NvRmPowerEventNotify_params_t
+{
+ NvRmPowerEventNotify_in in;
+ NvRmPowerEventNotify_inout inout;
+ NvRmPowerEventNotify_out out;
+} NvRmPowerEventNotify_params;
+
+typedef struct NvRmPowerGetEvent_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvU32 ClientId;
+} NV_ALIGN(4) NvRmPowerGetEvent_in;
+
+typedef struct NvRmPowerGetEvent_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerGetEvent_inout;
+
+typedef struct NvRmPowerGetEvent_out_t
+{
+ NvError ret_;
+ NvRmPowerEvent pEvent;
+} NV_ALIGN(4) NvRmPowerGetEvent_out;
+
+typedef struct NvRmPowerGetEvent_params_t
+{
+ NvRmPowerGetEvent_in in;
+ NvRmPowerGetEvent_inout inout;
+ NvRmPowerGetEvent_out out;
+} NvRmPowerGetEvent_params;
+
+typedef struct NvRmPowerUnRegister_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvU32 ClientId;
+} NV_ALIGN(4) NvRmPowerUnRegister_in;
+
+typedef struct NvRmPowerUnRegister_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerUnRegister_inout;
+
+typedef struct NvRmPowerUnRegister_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPowerUnRegister_out;
+
+typedef struct NvRmPowerUnRegister_params_t
+{
+ NvRmPowerUnRegister_in in;
+ NvRmPowerUnRegister_inout inout;
+ NvRmPowerUnRegister_out out;
+} NvRmPowerUnRegister_params;
+
+typedef struct NvRmPowerRegister_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDeviceHandle;
+ NvOsSemaphoreHandle hEventSemaphore;
+} NV_ALIGN(4) NvRmPowerRegister_in;
+
+typedef struct NvRmPowerRegister_inout_t
+{
+ NvU32 pClientId;
+} NV_ALIGN(4) NvRmPowerRegister_inout;
+
+typedef struct NvRmPowerRegister_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmPowerRegister_out;
+
+typedef struct NvRmPowerRegister_params_t
+{
+ NvRmPowerRegister_in in;
+ NvRmPowerRegister_inout inout;
+ NvRmPowerRegister_out out;
+} NvRmPowerRegister_params;
+
+static NvError NvRmKernelPowerResume_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmKernelPowerResume_in *p_in;
+ NvRmKernelPowerResume_out *p_out;
+
+ p_in = (NvRmKernelPowerResume_in *)InBuffer;
+ p_out = (NvRmKernelPowerResume_out *)((NvU8 *)OutBuffer + OFFSET(NvRmKernelPowerResume_params, out) - OFFSET(NvRmKernelPowerResume_params, inout));
+
+
+ p_out->ret_ = NvRmKernelPowerResume( p_in->hRmDeviceHandle );
+
+ return err_;
+}
+
+static NvError NvRmKernelPowerSuspend_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmKernelPowerSuspend_in *p_in;
+ NvRmKernelPowerSuspend_out *p_out;
+
+ p_in = (NvRmKernelPowerSuspend_in *)InBuffer;
+ p_out = (NvRmKernelPowerSuspend_out *)((NvU8 *)OutBuffer + OFFSET(NvRmKernelPowerSuspend_params, out) - OFFSET(NvRmKernelPowerSuspend_params, inout));
+
+
+ p_out->ret_ = NvRmKernelPowerSuspend( p_in->hRmDeviceHandle );
+
+ return err_;
+}
+
+static NvError NvRmDfsSetLowVoltageThreshold_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsSetLowVoltageThreshold_in *p_in;
+
+ p_in = (NvRmDfsSetLowVoltageThreshold_in *)InBuffer;
+
+
+ NvRmDfsSetLowVoltageThreshold( p_in->hRmDeviceHandle, p_in->RailId, p_in->LowMv );
+
+ return err_;
+}
+
+static NvError NvRmDfsGetLowVoltageThreshold_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsGetLowVoltageThreshold_in *p_in;
+ NvRmDfsGetLowVoltageThreshold_out *p_out;
+
+ p_in = (NvRmDfsGetLowVoltageThreshold_in *)InBuffer;
+ p_out = (NvRmDfsGetLowVoltageThreshold_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsGetLowVoltageThreshold_params, out) - OFFSET(NvRmDfsGetLowVoltageThreshold_params, inout));
+
+
+ NvRmDfsGetLowVoltageThreshold( p_in->hRmDeviceHandle, p_in->RailId, &p_out->pLowMv, &p_out->pPresentMv );
+
+ return err_;
+}
+
+static NvError NvRmDfsLogBusyGetEntry_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsLogBusyGetEntry_in *p_in;
+ NvRmDfsLogBusyGetEntry_out *p_out;
+
+ p_in = (NvRmDfsLogBusyGetEntry_in *)InBuffer;
+ p_out = (NvRmDfsLogBusyGetEntry_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsLogBusyGetEntry_params, out) - OFFSET(NvRmDfsLogBusyGetEntry_params, inout));
+
+
+ p_out->ret_ = NvRmDfsLogBusyGetEntry( p_in->hRmDeviceHandle, p_in->EntryIndex, &p_out->pSampleIndex, &p_out->pClientId, &p_out->pClientTag, &p_out->pBusyHint );
+
+ return err_;
+}
+
+static NvError NvRmDfsLogStarvationGetEntry_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsLogStarvationGetEntry_in *p_in;
+ NvRmDfsLogStarvationGetEntry_out *p_out;
+
+ p_in = (NvRmDfsLogStarvationGetEntry_in *)InBuffer;
+ p_out = (NvRmDfsLogStarvationGetEntry_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsLogStarvationGetEntry_params, out) - OFFSET(NvRmDfsLogStarvationGetEntry_params, inout));
+
+
+ p_out->ret_ = NvRmDfsLogStarvationGetEntry( p_in->hRmDeviceHandle, p_in->EntryIndex, &p_out->pSampleIndex, &p_out->pClientId, &p_out->pClientTag, &p_out->pStarvationHint );
+
+ return err_;
+}
+
+static NvError NvRmDfsLogActivityGetEntry_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsLogActivityGetEntry_in *p_in;
+ NvRmDfsLogActivityGetEntry_out *p_out;
+ NvU32 *pActiveCyclesList = NULL;
+ NvRmFreqKHz *pAveragesList = NULL;
+ NvRmFreqKHz *pFrequenciesList = NULL;
+
+ p_in = (NvRmDfsLogActivityGetEntry_in *)InBuffer;
+ p_out = (NvRmDfsLogActivityGetEntry_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsLogActivityGetEntry_params, out) - OFFSET(NvRmDfsLogActivityGetEntry_params, inout));
+
+ if( p_in->LogDomainsCount && p_in->pActiveCyclesList )
+ {
+ pActiveCyclesList = (NvU32 *)NvOsAlloc( p_in->LogDomainsCount * sizeof( NvU32 ) );
+ if( !pActiveCyclesList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+ if( p_in->LogDomainsCount && p_in->pAveragesList )
+ {
+ pAveragesList = (NvRmFreqKHz *)NvOsAlloc( p_in->LogDomainsCount * sizeof( NvRmFreqKHz ) );
+ if( !pAveragesList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+ if( p_in->LogDomainsCount && p_in->pFrequenciesList )
+ {
+ pFrequenciesList = (NvRmFreqKHz *)NvOsAlloc( p_in->LogDomainsCount * sizeof( NvRmFreqKHz ) );
+ if( !pFrequenciesList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmDfsLogActivityGetEntry( p_in->hRmDeviceHandle, p_in->EntryIndex, p_in->LogDomainsCount, &p_out->pIntervalMs, &p_out->pLp2TimeMs, pActiveCyclesList, pAveragesList, pFrequenciesList );
+
+ if(p_in->pActiveCyclesList && pActiveCyclesList)
+ {
+ err_ = NvOsCopyOut( p_in->pActiveCyclesList, pActiveCyclesList, p_in->LogDomainsCount * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+ if(p_in->pAveragesList && pAveragesList)
+ {
+ err_ = NvOsCopyOut( p_in->pAveragesList, pAveragesList, p_in->LogDomainsCount * sizeof( NvRmFreqKHz ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+ if(p_in->pFrequenciesList && pFrequenciesList)
+ {
+ err_ = NvOsCopyOut( p_in->pFrequenciesList, pFrequenciesList, p_in->LogDomainsCount * sizeof( NvRmFreqKHz ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pActiveCyclesList );
+ NvOsFree( pAveragesList );
+ NvOsFree( pFrequenciesList );
+ return err_;
+}
+
+static NvError NvRmDfsLogGetMeanFrequencies_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsLogGetMeanFrequencies_in *p_in;
+ NvRmDfsLogGetMeanFrequencies_out *p_out;
+ NvRmFreqKHz *pLogMeanFreqList = NULL;
+
+ p_in = (NvRmDfsLogGetMeanFrequencies_in *)InBuffer;
+ p_out = (NvRmDfsLogGetMeanFrequencies_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsLogGetMeanFrequencies_params, out) - OFFSET(NvRmDfsLogGetMeanFrequencies_params, inout));
+
+ if( p_in->LogMeanFreqListCount && p_in->pLogMeanFreqList )
+ {
+ pLogMeanFreqList = (NvRmFreqKHz *)NvOsAlloc( p_in->LogMeanFreqListCount * sizeof( NvRmFreqKHz ) );
+ if( !pLogMeanFreqList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmDfsLogGetMeanFrequencies( p_in->hRmDeviceHandle, p_in->LogMeanFreqListCount, pLogMeanFreqList, &p_out->pLogLp2TimeMs, &p_out->pLogLp2Entries );
+
+ if(p_in->pLogMeanFreqList && pLogMeanFreqList)
+ {
+ err_ = NvOsCopyOut( p_in->pLogMeanFreqList, pLogMeanFreqList, p_in->LogMeanFreqListCount * sizeof( NvRmFreqKHz ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pLogMeanFreqList );
+ return err_;
+}
+
+static NvError NvRmDfsLogStart_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsLogStart_in *p_in;
+
+ p_in = (NvRmDfsLogStart_in *)InBuffer;
+
+
+ NvRmDfsLogStart( p_in->hRmDeviceHandle );
+
+ return err_;
+}
+
+static NvError NvRmDfsGetProfileData_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsGetProfileData_in *p_in;
+ NvRmDfsGetProfileData_out *p_out;
+ NvU32 *pSamplesNoList = NULL;
+ NvU32 *pProfileTimeUsList = NULL;
+
+ p_in = (NvRmDfsGetProfileData_in *)InBuffer;
+ p_out = (NvRmDfsGetProfileData_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsGetProfileData_params, out) - OFFSET(NvRmDfsGetProfileData_params, inout));
+
+ if( p_in->DfsProfileCount && p_in->pSamplesNoList )
+ {
+ pSamplesNoList = (NvU32 *)NvOsAlloc( p_in->DfsProfileCount * sizeof( NvU32 ) );
+ if( !pSamplesNoList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+ if( p_in->DfsProfileCount && p_in->pProfileTimeUsList )
+ {
+ pProfileTimeUsList = (NvU32 *)NvOsAlloc( p_in->DfsProfileCount * sizeof( NvU32 ) );
+ if( !pProfileTimeUsList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmDfsGetProfileData( p_in->hRmDeviceHandle, p_in->DfsProfileCount, pSamplesNoList, pProfileTimeUsList, &p_out->pDfsPeriodUs );
+
+ if(p_in->pSamplesNoList && pSamplesNoList)
+ {
+ err_ = NvOsCopyOut( p_in->pSamplesNoList, pSamplesNoList, p_in->DfsProfileCount * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+ if(p_in->pProfileTimeUsList && pProfileTimeUsList)
+ {
+ err_ = NvOsCopyOut( p_in->pProfileTimeUsList, pProfileTimeUsList, p_in->DfsProfileCount * sizeof( NvU32 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pSamplesNoList );
+ NvOsFree( pProfileTimeUsList );
+ return err_;
+}
+
+static NvError NvRmDfsSetAvHighCorner_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsSetAvHighCorner_in *p_in;
+ NvRmDfsSetAvHighCorner_out *p_out;
+
+ p_in = (NvRmDfsSetAvHighCorner_in *)InBuffer;
+ p_out = (NvRmDfsSetAvHighCorner_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsSetAvHighCorner_params, out) - OFFSET(NvRmDfsSetAvHighCorner_params, inout));
+
+
+ p_out->ret_ = NvRmDfsSetAvHighCorner( p_in->hRmDeviceHandle, p_in->DfsSystemHighKHz, p_in->DfsAvpHighKHz, p_in->DfsVpipeHighKHz );
+
+ return err_;
+}
+
+static NvError NvRmDfsSetCpuEmcHighCorner_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsSetCpuEmcHighCorner_in *p_in;
+ NvRmDfsSetCpuEmcHighCorner_out *p_out;
+
+ p_in = (NvRmDfsSetCpuEmcHighCorner_in *)InBuffer;
+ p_out = (NvRmDfsSetCpuEmcHighCorner_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsSetCpuEmcHighCorner_params, out) - OFFSET(NvRmDfsSetCpuEmcHighCorner_params, inout));
+
+
+ p_out->ret_ = NvRmDfsSetCpuEmcHighCorner( p_in->hRmDeviceHandle, p_in->DfsCpuHighKHz, p_in->DfsEmcHighKHz );
+
+ return err_;
+}
+
+static NvError NvRmDfsSetEmcEnvelope_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsSetEmcEnvelope_in *p_in;
+ NvRmDfsSetEmcEnvelope_out *p_out;
+
+ p_in = (NvRmDfsSetEmcEnvelope_in *)InBuffer;
+ p_out = (NvRmDfsSetEmcEnvelope_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsSetEmcEnvelope_params, out) - OFFSET(NvRmDfsSetEmcEnvelope_params, inout));
+
+
+ p_out->ret_ = NvRmDfsSetEmcEnvelope( p_in->hRmDeviceHandle, p_in->DfsEmcLowCornerKHz, p_in->DfsEmcHighCornerKHz );
+
+ return err_;
+}
+
+static NvError NvRmDfsSetCpuEnvelope_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsSetCpuEnvelope_in *p_in;
+ NvRmDfsSetCpuEnvelope_out *p_out;
+
+ p_in = (NvRmDfsSetCpuEnvelope_in *)InBuffer;
+ p_out = (NvRmDfsSetCpuEnvelope_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsSetCpuEnvelope_params, out) - OFFSET(NvRmDfsSetCpuEnvelope_params, inout));
+
+
+ p_out->ret_ = NvRmDfsSetCpuEnvelope( p_in->hRmDeviceHandle, p_in->DfsCpuLowCornerKHz, p_in->DfsCpuHighCornerKHz );
+
+ return err_;
+}
+
+static NvError NvRmDfsSetTarget_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsSetTarget_in *p_in;
+ NvRmDfsSetTarget_out *p_out;
+ NvRmFreqKHz *pDfsTargetFreqList = NULL;
+
+ p_in = (NvRmDfsSetTarget_in *)InBuffer;
+ p_out = (NvRmDfsSetTarget_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsSetTarget_params, out) - OFFSET(NvRmDfsSetTarget_params, inout));
+
+ if( p_in->DfsFreqListCount && p_in->pDfsTargetFreqList )
+ {
+ pDfsTargetFreqList = (NvRmFreqKHz *)NvOsAlloc( p_in->DfsFreqListCount * sizeof( NvRmFreqKHz ) );
+ if( !pDfsTargetFreqList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pDfsTargetFreqList )
+ {
+ err_ = NvOsCopyIn( pDfsTargetFreqList, p_in->pDfsTargetFreqList, p_in->DfsFreqListCount * sizeof( NvRmFreqKHz ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmDfsSetTarget( p_in->hRmDeviceHandle, p_in->DfsFreqListCount, pDfsTargetFreqList );
+
+clean:
+ NvOsFree( pDfsTargetFreqList );
+ return err_;
+}
+
+static NvError NvRmDfsSetLowCorner_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsSetLowCorner_in *p_in;
+ NvRmDfsSetLowCorner_out *p_out;
+ NvRmFreqKHz *pDfsLowFreqList = NULL;
+
+ p_in = (NvRmDfsSetLowCorner_in *)InBuffer;
+ p_out = (NvRmDfsSetLowCorner_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsSetLowCorner_params, out) - OFFSET(NvRmDfsSetLowCorner_params, inout));
+
+ if( p_in->DfsFreqListCount && p_in->pDfsLowFreqList )
+ {
+ pDfsLowFreqList = (NvRmFreqKHz *)NvOsAlloc( p_in->DfsFreqListCount * sizeof( NvRmFreqKHz ) );
+ if( !pDfsLowFreqList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pDfsLowFreqList )
+ {
+ err_ = NvOsCopyIn( pDfsLowFreqList, p_in->pDfsLowFreqList, p_in->DfsFreqListCount * sizeof( NvRmFreqKHz ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmDfsSetLowCorner( p_in->hRmDeviceHandle, p_in->DfsFreqListCount, pDfsLowFreqList );
+
+clean:
+ NvOsFree( pDfsLowFreqList );
+ return err_;
+}
+
+static NvError NvRmDfsSetState_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsSetState_in *p_in;
+ NvRmDfsSetState_out *p_out;
+
+ p_in = (NvRmDfsSetState_in *)InBuffer;
+ p_out = (NvRmDfsSetState_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsSetState_params, out) - OFFSET(NvRmDfsSetState_params, inout));
+
+
+ p_out->ret_ = NvRmDfsSetState( p_in->hRmDeviceHandle, p_in->NewDfsRunState );
+
+ return err_;
+}
+
+static NvError NvRmDfsGetClockUtilization_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsGetClockUtilization_in *p_in;
+ NvRmDfsGetClockUtilization_out *p_out;
+
+ p_in = (NvRmDfsGetClockUtilization_in *)InBuffer;
+ p_out = (NvRmDfsGetClockUtilization_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsGetClockUtilization_params, out) - OFFSET(NvRmDfsGetClockUtilization_params, inout));
+
+
+ p_out->ret_ = NvRmDfsGetClockUtilization( p_in->hRmDeviceHandle, p_in->ClockId, &p_out->pClockUsage );
+
+ return err_;
+}
+
+static NvError NvRmDfsGetState_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmDfsGetState_in *p_in;
+ NvRmDfsGetState_out *p_out;
+
+ p_in = (NvRmDfsGetState_in *)InBuffer;
+ p_out = (NvRmDfsGetState_out *)((NvU8 *)OutBuffer + OFFSET(NvRmDfsGetState_params, out) - OFFSET(NvRmDfsGetState_params, inout));
+
+
+ p_out->ret_ = NvRmDfsGetState( p_in->hRmDeviceHandle );
+
+ return err_;
+}
+
+static NvError NvRmPowerActivityHint_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerActivityHint_in *p_in;
+ NvRmPowerActivityHint_out *p_out;
+
+ p_in = (NvRmPowerActivityHint_in *)InBuffer;
+ p_out = (NvRmPowerActivityHint_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerActivityHint_params, out) - OFFSET(NvRmPowerActivityHint_params, inout));
+
+
+ p_out->ret_ = NvRmPowerActivityHint( p_in->hRmDeviceHandle, p_in->ModuleId, p_in->ClientId, p_in->ActivityDurationMs );
+
+ return err_;
+}
+
+static NvError NvRmPowerStarvationHintMulti_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerStarvationHintMulti_in *p_in;
+ NvRmPowerStarvationHintMulti_out *p_out;
+ NvRmDfsStarvationHint *pMultiHint = NULL;
+
+ p_in = (NvRmPowerStarvationHintMulti_in *)InBuffer;
+ p_out = (NvRmPowerStarvationHintMulti_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerStarvationHintMulti_params, out) - OFFSET(NvRmPowerStarvationHintMulti_params, inout));
+
+ if( p_in->NumHints && p_in->pMultiHint )
+ {
+ pMultiHint = (NvRmDfsStarvationHint *)NvOsAlloc( p_in->NumHints * sizeof( NvRmDfsStarvationHint ) );
+ if( !pMultiHint )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pMultiHint )
+ {
+ err_ = NvOsCopyIn( pMultiHint, p_in->pMultiHint, p_in->NumHints * sizeof( NvRmDfsStarvationHint ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmPowerStarvationHintMulti( p_in->hRmDeviceHandle, p_in->ClientId, pMultiHint, p_in->NumHints );
+
+clean:
+ NvOsFree( pMultiHint );
+ return err_;
+}
+
+static NvError NvRmPowerStarvationHint_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerStarvationHint_in *p_in;
+ NvRmPowerStarvationHint_out *p_out;
+
+ p_in = (NvRmPowerStarvationHint_in *)InBuffer;
+ p_out = (NvRmPowerStarvationHint_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerStarvationHint_params, out) - OFFSET(NvRmPowerStarvationHint_params, inout));
+
+
+ p_out->ret_ = NvRmPowerStarvationHint( p_in->hRmDeviceHandle, p_in->ClockId, p_in->ClientId, p_in->Starving );
+
+ return err_;
+}
+
+static NvError NvRmPowerBusyHintMulti_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerBusyHintMulti_in *p_in;
+ NvRmPowerBusyHintMulti_out *p_out;
+ NvRmDfsBusyHint *pMultiHint = NULL;
+
+ p_in = (NvRmPowerBusyHintMulti_in *)InBuffer;
+ p_out = (NvRmPowerBusyHintMulti_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerBusyHintMulti_params, out) - OFFSET(NvRmPowerBusyHintMulti_params, inout));
+
+ if( p_in->NumHints && p_in->pMultiHint )
+ {
+ pMultiHint = (NvRmDfsBusyHint *)NvOsAlloc( p_in->NumHints * sizeof( NvRmDfsBusyHint ) );
+ if( !pMultiHint )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pMultiHint )
+ {
+ err_ = NvOsCopyIn( pMultiHint, p_in->pMultiHint, p_in->NumHints * sizeof( NvRmDfsBusyHint ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmPowerBusyHintMulti( p_in->hRmDeviceHandle, p_in->ClientId, pMultiHint, p_in->NumHints, p_in->Mode );
+
+clean:
+ NvOsFree( pMultiHint );
+ return err_;
+}
+
+static NvError NvRmPowerBusyHint_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerBusyHint_in *p_in;
+ NvRmPowerBusyHint_out *p_out;
+
+ p_in = (NvRmPowerBusyHint_in *)InBuffer;
+ p_out = (NvRmPowerBusyHint_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerBusyHint_params, out) - OFFSET(NvRmPowerBusyHint_params, inout));
+
+
+ p_out->ret_ = NvRmPowerBusyHint( p_in->hRmDeviceHandle, p_in->ClockId, p_in->ClientId, p_in->BoostDurationMs, p_in->BoostKHz );
+
+ return err_;
+}
+
+static NvError NvRmListPowerAwareModules_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmListPowerAwareModules_in *p_in;
+ NvRmListPowerAwareModules_inout *p_inout;
+ NvRmListPowerAwareModules_inout inout;
+ NvRmModuleID *pIdList = NULL;
+ NvBool *pActiveList = NULL;
+
+ p_in = (NvRmListPowerAwareModules_in *)InBuffer;
+ p_inout = (NvRmListPowerAwareModules_inout *)((NvU8 *)InBuffer + OFFSET(NvRmListPowerAwareModules_params, inout));
+
+ (void)inout;
+ inout.pListSize = p_inout->pListSize;
+ if( p_inout->pListSize && p_in->pIdList )
+ {
+ pIdList = (NvRmModuleID *)NvOsAlloc( p_inout->pListSize * sizeof( NvRmModuleID ) );
+ if( !pIdList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+ if( p_inout->pListSize && p_in->pActiveList )
+ {
+ pActiveList = (NvBool *)NvOsAlloc( p_inout->pListSize * sizeof( NvBool ) );
+ if( !pActiveList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ NvRmListPowerAwareModules( p_in->hRmDeviceHandle, &inout.pListSize, pIdList, pActiveList );
+
+
+ p_inout = (NvRmListPowerAwareModules_inout *)OutBuffer;
+ p_inout->pListSize = inout.pListSize;
+ if(p_in->pIdList && pIdList)
+ {
+ err_ = NvOsCopyOut( p_in->pIdList, pIdList, p_inout->pListSize * sizeof( NvRmModuleID ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+ if(p_in->pActiveList && pActiveList)
+ {
+ err_ = NvOsCopyOut( p_in->pActiveList, pActiveList, p_inout->pListSize * sizeof( NvBool ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pIdList );
+ NvOsFree( pActiveList );
+ return err_;
+}
+
+static NvError NvRmPowerVoltageControl_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerVoltageControl_in *p_in;
+ NvRmPowerVoltageControl_out *p_out;
+ NvRmMilliVolts *PrefVoltageList = NULL;
+
+ p_in = (NvRmPowerVoltageControl_in *)InBuffer;
+ p_out = (NvRmPowerVoltageControl_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerVoltageControl_params, out) - OFFSET(NvRmPowerVoltageControl_params, inout));
+
+ if( p_in->PrefVoltageListCount && p_in->PrefVoltageList )
+ {
+ PrefVoltageList = (NvRmMilliVolts *)NvOsAlloc( p_in->PrefVoltageListCount * sizeof( NvRmMilliVolts ) );
+ if( !PrefVoltageList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->PrefVoltageList )
+ {
+ err_ = NvOsCopyIn( PrefVoltageList, p_in->PrefVoltageList, p_in->PrefVoltageListCount * sizeof( NvRmMilliVolts ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmPowerVoltageControl( p_in->hRmDeviceHandle, p_in->ModuleId, p_in->ClientId, p_in->MinVolts, p_in->MaxVolts, PrefVoltageList, p_in->PrefVoltageListCount, &p_out->CurrentVolts );
+
+clean:
+ NvOsFree( PrefVoltageList );
+ return err_;
+}
+
+static NvError NvRmPowerModuleClockControl_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerModuleClockControl_in *p_in;
+ NvRmPowerModuleClockControl_out *p_out;
+
+ p_in = (NvRmPowerModuleClockControl_in *)InBuffer;
+ p_out = (NvRmPowerModuleClockControl_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerModuleClockControl_params, out) - OFFSET(NvRmPowerModuleClockControl_params, inout));
+
+
+ p_out->ret_ = NvRmPowerModuleClockControl( p_in->hRmDeviceHandle, p_in->ModuleId, p_in->ClientId, p_in->Enable );
+
+ return err_;
+}
+
+static NvError NvRmPowerModuleClockConfig_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerModuleClockConfig_in *p_in;
+ NvRmPowerModuleClockConfig_out *p_out;
+ NvRmFreqKHz *PrefFreqList = NULL;
+
+ p_in = (NvRmPowerModuleClockConfig_in *)InBuffer;
+ p_out = (NvRmPowerModuleClockConfig_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerModuleClockConfig_params, out) - OFFSET(NvRmPowerModuleClockConfig_params, inout));
+
+ if( p_in->PrefFreqListCount && p_in->PrefFreqList )
+ {
+ PrefFreqList = (NvRmFreqKHz *)NvOsAlloc( p_in->PrefFreqListCount * sizeof( NvRmFreqKHz ) );
+ if( !PrefFreqList )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->PrefFreqList )
+ {
+ err_ = NvOsCopyIn( PrefFreqList, p_in->PrefFreqList, p_in->PrefFreqListCount * sizeof( NvRmFreqKHz ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmPowerModuleClockConfig( p_in->hRmDeviceHandle, p_in->ModuleId, p_in->ClientId, p_in->MinFreq, p_in->MaxFreq, PrefFreqList, p_in->PrefFreqListCount, &p_out->CurrentFreq, p_in->flags );
+
+clean:
+ NvOsFree( PrefFreqList );
+ return err_;
+}
+
+static NvError NvRmPowerModuleGetMaxFrequency_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerModuleGetMaxFrequency_in *p_in;
+ NvRmPowerModuleGetMaxFrequency_out *p_out;
+
+ p_in = (NvRmPowerModuleGetMaxFrequency_in *)InBuffer;
+ p_out = (NvRmPowerModuleGetMaxFrequency_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerModuleGetMaxFrequency_params, out) - OFFSET(NvRmPowerModuleGetMaxFrequency_params, inout));
+
+
+ p_out->ret_ = NvRmPowerModuleGetMaxFrequency( p_in->hRmDeviceHandle, p_in->ModuleId );
+
+ return err_;
+}
+
+static NvError NvRmPowerGetPrimaryFrequency_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerGetPrimaryFrequency_in *p_in;
+ NvRmPowerGetPrimaryFrequency_out *p_out;
+
+ p_in = (NvRmPowerGetPrimaryFrequency_in *)InBuffer;
+ p_out = (NvRmPowerGetPrimaryFrequency_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerGetPrimaryFrequency_params, out) - OFFSET(NvRmPowerGetPrimaryFrequency_params, inout));
+
+
+ p_out->ret_ = NvRmPowerGetPrimaryFrequency( p_in->hRmDeviceHandle );
+
+ return err_;
+}
+
+static NvError NvRmPowerGetState_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerGetState_in *p_in;
+ NvRmPowerGetState_out *p_out;
+
+ p_in = (NvRmPowerGetState_in *)InBuffer;
+ p_out = (NvRmPowerGetState_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerGetState_params, out) - OFFSET(NvRmPowerGetState_params, inout));
+
+
+ p_out->ret_ = NvRmPowerGetState( p_in->hRmDeviceHandle, &p_out->pState );
+
+ return err_;
+}
+
+static NvError NvRmPowerEventNotify_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerEventNotify_in *p_in;
+
+ p_in = (NvRmPowerEventNotify_in *)InBuffer;
+
+
+ NvRmPowerEventNotify( p_in->hRmDeviceHandle, p_in->Event );
+
+ return err_;
+}
+
+static NvError NvRmPowerGetEvent_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerGetEvent_in *p_in;
+ NvRmPowerGetEvent_out *p_out;
+
+ p_in = (NvRmPowerGetEvent_in *)InBuffer;
+ p_out = (NvRmPowerGetEvent_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerGetEvent_params, out) - OFFSET(NvRmPowerGetEvent_params, inout));
+
+
+ p_out->ret_ = NvRmPowerGetEvent( p_in->hRmDeviceHandle, p_in->ClientId, &p_out->pEvent );
+
+ return err_;
+}
+
+static NvError NvRmPowerUnRegister_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerUnRegister_in *p_in;
+
+ p_in = (NvRmPowerUnRegister_in *)InBuffer;
+
+
+ NvRmPowerUnRegister( p_in->hRmDeviceHandle, p_in->ClientId );
+
+ return err_;
+}
+
+static NvError NvRmPowerRegister_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPowerRegister_in *p_in;
+ NvRmPowerRegister_inout *p_inout;
+ NvRmPowerRegister_out *p_out;
+ NvRmPowerRegister_inout inout;
+ NvOsSemaphoreHandle hEventSemaphore = NULL;
+
+ p_in = (NvRmPowerRegister_in *)InBuffer;
+ p_inout = (NvRmPowerRegister_inout *)((NvU8 *)InBuffer + OFFSET(NvRmPowerRegister_params, inout));
+ p_out = (NvRmPowerRegister_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPowerRegister_params, out) - OFFSET(NvRmPowerRegister_params, inout));
+
+ (void)inout;
+ if( p_in->hEventSemaphore )
+ {
+ err_ = NvOsSemaphoreUnmarshal( p_in->hEventSemaphore, &hEventSemaphore );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ inout.pClientId = p_inout->pClientId;
+
+ p_out->ret_ = NvRmPowerRegister( p_in->hRmDeviceHandle, hEventSemaphore, &inout.pClientId );
+
+
+ p_inout = (NvRmPowerRegister_inout *)OutBuffer;
+ p_inout->pClientId = inout.pClientId;
+clean:
+ NvOsSemaphoreDestroy( hEventSemaphore );
+ return err_;
+}
+
+NvError nvrm_power_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_power_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 34:
+ err_ = NvRmKernelPowerResume_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 33:
+ err_ = NvRmKernelPowerSuspend_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 32:
+ err_ = NvRmDfsSetLowVoltageThreshold_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 31:
+ err_ = NvRmDfsGetLowVoltageThreshold_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 30:
+ err_ = NvRmDfsLogBusyGetEntry_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 29:
+ err_ = NvRmDfsLogStarvationGetEntry_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 28:
+ err_ = NvRmDfsLogActivityGetEntry_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 27:
+ err_ = NvRmDfsLogGetMeanFrequencies_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 26:
+ err_ = NvRmDfsLogStart_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 25:
+ err_ = NvRmDfsGetProfileData_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 24:
+ err_ = NvRmDfsSetAvHighCorner_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 23:
+ err_ = NvRmDfsSetCpuEmcHighCorner_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 22:
+ err_ = NvRmDfsSetEmcEnvelope_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 21:
+ err_ = NvRmDfsSetCpuEnvelope_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 20:
+ err_ = NvRmDfsSetTarget_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 19:
+ err_ = NvRmDfsSetLowCorner_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 18:
+ err_ = NvRmDfsSetState_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 17:
+ err_ = NvRmDfsGetClockUtilization_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 16:
+ err_ = NvRmDfsGetState_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 15:
+ err_ = NvRmPowerActivityHint_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 14:
+ err_ = NvRmPowerStarvationHintMulti_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 13:
+ err_ = NvRmPowerStarvationHint_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 12:
+ err_ = NvRmPowerBusyHintMulti_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 11:
+ err_ = NvRmPowerBusyHint_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 10:
+ err_ = NvRmListPowerAwareModules_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 9:
+ err_ = NvRmPowerVoltageControl_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 8:
+ err_ = NvRmPowerModuleClockControl_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 7:
+ err_ = NvRmPowerModuleClockConfig_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 6:
+ err_ = NvRmPowerModuleGetMaxFrequency_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 5:
+ err_ = NvRmPowerGetPrimaryFrequency_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 4:
+ err_ = NvRmPowerGetState_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 3:
+ err_ = NvRmPowerEventNotify_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = NvRmPowerGetEvent_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmPowerUnRegister_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmPowerRegister_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_pwm_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_pwm_dispatch.c
new file mode 100644
index 000000000000..44d1b5e4a3b3
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_pwm_dispatch.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_pwm.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmPwmConfig_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmPwmHandle hPwm;
+ NvRmPwmOutputId OutputId;
+ NvRmPwmMode Mode;
+ NvU32 DutyCycle;
+ NvU32 RequestedFreqHzOrPeriod;
+} NV_ALIGN(4) NvRmPwmConfig_in;
+
+typedef struct NvRmPwmConfig_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPwmConfig_inout;
+
+typedef struct NvRmPwmConfig_out_t
+{
+ NvError ret_;
+ NvU32 pCurrentFreqHzOrPeriod;
+} NV_ALIGN(4) NvRmPwmConfig_out;
+
+typedef struct NvRmPwmConfig_params_t
+{
+ NvRmPwmConfig_in in;
+ NvRmPwmConfig_inout inout;
+ NvRmPwmConfig_out out;
+} NvRmPwmConfig_params;
+
+typedef struct NvRmPwmClose_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmPwmHandle hPwm;
+} NV_ALIGN(4) NvRmPwmClose_in;
+
+typedef struct NvRmPwmClose_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPwmClose_inout;
+
+typedef struct NvRmPwmClose_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPwmClose_out;
+
+typedef struct NvRmPwmClose_params_t
+{
+ NvRmPwmClose_in in;
+ NvRmPwmClose_inout inout;
+ NvRmPwmClose_out out;
+} NvRmPwmClose_params;
+
+typedef struct NvRmPwmOpen_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hDevice;
+} NV_ALIGN(4) NvRmPwmOpen_in;
+
+typedef struct NvRmPwmOpen_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmPwmOpen_inout;
+
+typedef struct NvRmPwmOpen_out_t
+{
+ NvError ret_;
+ NvRmPwmHandle phPwm;
+} NV_ALIGN(4) NvRmPwmOpen_out;
+
+typedef struct NvRmPwmOpen_params_t
+{
+ NvRmPwmOpen_in in;
+ NvRmPwmOpen_inout inout;
+ NvRmPwmOpen_out out;
+} NvRmPwmOpen_params;
+
+static NvError NvRmPwmConfig_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPwmConfig_in *p_in;
+ NvRmPwmConfig_out *p_out;
+
+ p_in = (NvRmPwmConfig_in *)InBuffer;
+ p_out = (NvRmPwmConfig_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPwmConfig_params, out) - OFFSET(NvRmPwmConfig_params, inout));
+
+
+ p_out->ret_ = NvRmPwmConfig( p_in->hPwm, p_in->OutputId, p_in->Mode, p_in->DutyCycle, p_in->RequestedFreqHzOrPeriod, &p_out->pCurrentFreqHzOrPeriod );
+
+ return err_;
+}
+
+static NvError NvRmPwmClose_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPwmClose_in *p_in;
+
+ p_in = (NvRmPwmClose_in *)InBuffer;
+
+
+ NvRmPwmClose( p_in->hPwm );
+
+ return err_;
+}
+
+static NvError NvRmPwmOpen_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmPwmOpen_in *p_in;
+ NvRmPwmOpen_out *p_out;
+
+ p_in = (NvRmPwmOpen_in *)InBuffer;
+ p_out = (NvRmPwmOpen_out *)((NvU8 *)OutBuffer + OFFSET(NvRmPwmOpen_params, out) - OFFSET(NvRmPwmOpen_params, inout));
+
+
+ p_out->ret_ = NvRmPwmOpen( p_in->hDevice, &p_out->phPwm );
+
+ return err_;
+}
+
+NvError nvrm_pwm_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_pwm_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 2:
+ err_ = NvRmPwmConfig_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmPwmClose_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmPwmOpen_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/dispatch/nvrm_spi_dispatch.c b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_spi_dispatch.c
new file mode 100644
index 000000000000..8581343518f5
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/dispatch/nvrm_spi_dispatch.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define NV_IDL_IS_DISPATCH
+
+#include "nvcommon.h"
+#include "nvos.h"
+#include "nvassert.h"
+#include "nvreftrack.h"
+#include "nvidlcmd.h"
+#include "nvrm_spi.h"
+
+#define OFFSET( s, e ) (NvU32)(void *)(&(((s*)0)->e))
+
+
+typedef struct NvRmSpiSetSignalMode_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmSpiHandle hRmSpi;
+ NvU32 ChipSelectId;
+ NvU32 SpiSignalMode;
+} NV_ALIGN(4) NvRmSpiSetSignalMode_in;
+
+typedef struct NvRmSpiSetSignalMode_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmSpiSetSignalMode_inout;
+
+typedef struct NvRmSpiSetSignalMode_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmSpiSetSignalMode_out;
+
+typedef struct NvRmSpiSetSignalMode_params_t
+{
+ NvRmSpiSetSignalMode_in in;
+ NvRmSpiSetSignalMode_inout inout;
+ NvRmSpiSetSignalMode_out out;
+} NvRmSpiSetSignalMode_params;
+
+typedef struct NvRmSpiGetTransactionData_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmSpiHandle hRmSpi;
+ NvU8 * pReadBuffer;
+ NvU32 BytesRequested;
+ NvU32 WaitTimeout;
+} NV_ALIGN(4) NvRmSpiGetTransactionData_in;
+
+typedef struct NvRmSpiGetTransactionData_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmSpiGetTransactionData_inout;
+
+typedef struct NvRmSpiGetTransactionData_out_t
+{
+ NvError ret_;
+ NvU32 pBytesTransfererd;
+} NV_ALIGN(4) NvRmSpiGetTransactionData_out;
+
+typedef struct NvRmSpiGetTransactionData_params_t
+{
+ NvRmSpiGetTransactionData_in in;
+ NvRmSpiGetTransactionData_inout inout;
+ NvRmSpiGetTransactionData_out out;
+} NvRmSpiGetTransactionData_params;
+
+typedef struct NvRmSpiStartTransaction_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmSpiHandle hRmSpi;
+ NvU32 ChipSelectId;
+ NvU32 ClockSpeedInKHz;
+ NvBool IsReadTransfer;
+ NvU8 * pWriteBuffer;
+ NvU32 BytesRequested;
+ NvU32 PacketSizeInBits;
+} NV_ALIGN(4) NvRmSpiStartTransaction_in;
+
+typedef struct NvRmSpiStartTransaction_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmSpiStartTransaction_inout;
+
+typedef struct NvRmSpiStartTransaction_out_t
+{
+ NvError ret_;
+} NV_ALIGN(4) NvRmSpiStartTransaction_out;
+
+typedef struct NvRmSpiStartTransaction_params_t
+{
+ NvRmSpiStartTransaction_in in;
+ NvRmSpiStartTransaction_inout inout;
+ NvRmSpiStartTransaction_out out;
+} NvRmSpiStartTransaction_params;
+
+typedef struct NvRmSpiTransaction_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmSpiHandle hRmSpi;
+ NvU32 SpiPinMap;
+ NvU32 ChipSelectId;
+ NvU32 ClockSpeedInKHz;
+ NvU8 * pReadBuffer;
+ NvU8 * pWriteBuffer;
+ NvU32 BytesRequested;
+ NvU32 PacketSizeInBits;
+} NV_ALIGN(4) NvRmSpiTransaction_in;
+
+typedef struct NvRmSpiTransaction_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmSpiTransaction_inout;
+
+typedef struct NvRmSpiTransaction_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmSpiTransaction_out;
+
+typedef struct NvRmSpiTransaction_params_t
+{
+ NvRmSpiTransaction_in in;
+ NvRmSpiTransaction_inout inout;
+ NvRmSpiTransaction_out out;
+} NvRmSpiTransaction_params;
+
+typedef struct NvRmSpiClose_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmSpiHandle hRmSpi;
+} NV_ALIGN(4) NvRmSpiClose_in;
+
+typedef struct NvRmSpiClose_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmSpiClose_inout;
+
+typedef struct NvRmSpiClose_out_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmSpiClose_out;
+
+typedef struct NvRmSpiClose_params_t
+{
+ NvRmSpiClose_in in;
+ NvRmSpiClose_inout inout;
+ NvRmSpiClose_out out;
+} NvRmSpiClose_params;
+
+typedef struct NvRmSpiOpen_in_t
+{
+ NvU32 package_;
+ NvU32 function_;
+ NvRmDeviceHandle hRmDevice;
+ NvU32 IoModule;
+ NvU32 InstanceId;
+ NvBool IsMasterMode;
+} NV_ALIGN(4) NvRmSpiOpen_in;
+
+typedef struct NvRmSpiOpen_inout_t
+{
+ NvU32 dummy_;
+} NV_ALIGN(4) NvRmSpiOpen_inout;
+
+typedef struct NvRmSpiOpen_out_t
+{
+ NvError ret_;
+ NvRmSpiHandle phRmSpi;
+} NV_ALIGN(4) NvRmSpiOpen_out;
+
+typedef struct NvRmSpiOpen_params_t
+{
+ NvRmSpiOpen_in in;
+ NvRmSpiOpen_inout inout;
+ NvRmSpiOpen_out out;
+} NvRmSpiOpen_params;
+
+static NvError NvRmSpiSetSignalMode_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmSpiSetSignalMode_in *p_in;
+
+ p_in = (NvRmSpiSetSignalMode_in *)InBuffer;
+
+
+ NvRmSpiSetSignalMode( p_in->hRmSpi, p_in->ChipSelectId, p_in->SpiSignalMode );
+
+ return err_;
+}
+
+static NvError NvRmSpiGetTransactionData_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmSpiGetTransactionData_in *p_in;
+ NvRmSpiGetTransactionData_out *p_out;
+ NvU8 *pReadBuffer = NULL;
+
+ p_in = (NvRmSpiGetTransactionData_in *)InBuffer;
+ p_out = (NvRmSpiGetTransactionData_out *)((NvU8 *)OutBuffer + OFFSET(NvRmSpiGetTransactionData_params, out) - OFFSET(NvRmSpiGetTransactionData_params, inout));
+
+ if( p_in->BytesRequested && p_in->pReadBuffer )
+ {
+ pReadBuffer = (NvU8 *)NvOsAlloc( p_in->BytesRequested * sizeof( NvU8 ) );
+ if( !pReadBuffer )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+
+ p_out->ret_ = NvRmSpiGetTransactionData( p_in->hRmSpi, pReadBuffer, p_in->BytesRequested, &p_out->pBytesTransfererd, p_in->WaitTimeout );
+
+ if(p_in->pReadBuffer && pReadBuffer)
+ {
+ err_ = NvOsCopyOut( p_in->pReadBuffer, pReadBuffer, p_in->BytesRequested * sizeof( NvU8 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pReadBuffer );
+ return err_;
+}
+
+static NvError NvRmSpiStartTransaction_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmSpiStartTransaction_in *p_in;
+ NvRmSpiStartTransaction_out *p_out;
+ NvU8 *pWriteBuffer = NULL;
+
+ p_in = (NvRmSpiStartTransaction_in *)InBuffer;
+ p_out = (NvRmSpiStartTransaction_out *)((NvU8 *)OutBuffer + OFFSET(NvRmSpiStartTransaction_params, out) - OFFSET(NvRmSpiStartTransaction_params, inout));
+
+ if( p_in->BytesRequested && p_in->pWriteBuffer )
+ {
+ pWriteBuffer = (NvU8 *)NvOsAlloc( p_in->BytesRequested * sizeof( NvU8 ) );
+ if( !pWriteBuffer )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pWriteBuffer )
+ {
+ err_ = NvOsCopyIn( pWriteBuffer, p_in->pWriteBuffer, p_in->BytesRequested * sizeof( NvU8 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ p_out->ret_ = NvRmSpiStartTransaction( p_in->hRmSpi, p_in->ChipSelectId, p_in->ClockSpeedInKHz, p_in->IsReadTransfer, pWriteBuffer, p_in->BytesRequested, p_in->PacketSizeInBits );
+
+clean:
+ NvOsFree( pWriteBuffer );
+ return err_;
+}
+
+static NvError NvRmSpiTransaction_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmSpiTransaction_in *p_in;
+ NvU8 *pReadBuffer = NULL;
+ NvU8 *pWriteBuffer = NULL;
+
+ p_in = (NvRmSpiTransaction_in *)InBuffer;
+
+ if( p_in->BytesRequested && p_in->pReadBuffer )
+ {
+ pReadBuffer = (NvU8 *)NvOsAlloc( p_in->BytesRequested * sizeof( NvU8 ) );
+ if( !pReadBuffer )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ }
+ if( p_in->BytesRequested && p_in->pWriteBuffer )
+ {
+ pWriteBuffer = (NvU8 *)NvOsAlloc( p_in->BytesRequested * sizeof( NvU8 ) );
+ if( !pWriteBuffer )
+ {
+ err_ = NvError_InsufficientMemory;
+ goto clean;
+ }
+ if( p_in->pWriteBuffer )
+ {
+ err_ = NvOsCopyIn( pWriteBuffer, p_in->pWriteBuffer, p_in->BytesRequested * sizeof( NvU8 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ goto clean;
+ }
+ }
+ }
+
+ NvRmSpiTransaction( p_in->hRmSpi, p_in->SpiPinMap, p_in->ChipSelectId, p_in->ClockSpeedInKHz, pReadBuffer, pWriteBuffer, p_in->BytesRequested, p_in->PacketSizeInBits );
+
+ if(p_in->pReadBuffer && pReadBuffer)
+ {
+ err_ = NvOsCopyOut( p_in->pReadBuffer, pReadBuffer, p_in->BytesRequested * sizeof( NvU8 ) );
+ if( err_ != NvSuccess )
+ {
+ err_ = NvError_BadParameter;
+ }
+ }
+clean:
+ NvOsFree( pReadBuffer );
+ NvOsFree( pWriteBuffer );
+ return err_;
+}
+
+static NvError NvRmSpiClose_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmSpiClose_in *p_in;
+
+ p_in = (NvRmSpiClose_in *)InBuffer;
+
+
+ NvRmSpiClose( p_in->hRmSpi );
+
+ return err_;
+}
+
+static NvError NvRmSpiOpen_dispatch_( void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+ NvRmSpiOpen_in *p_in;
+ NvRmSpiOpen_out *p_out;
+
+ p_in = (NvRmSpiOpen_in *)InBuffer;
+ p_out = (NvRmSpiOpen_out *)((NvU8 *)OutBuffer + OFFSET(NvRmSpiOpen_params, out) - OFFSET(NvRmSpiOpen_params, inout));
+
+
+ p_out->ret_ = NvRmSpiOpen( p_in->hRmDevice, p_in->IoModule, p_in->InstanceId, p_in->IsMasterMode, &p_out->phRmSpi );
+
+ return err_;
+}
+
+NvError nvrm_spi_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx );
+NvError nvrm_spi_Dispatch( NvU32 function, void *InBuffer, NvU32 InSize, void *OutBuffer, NvU32 OutSize, NvDispatchCtx* Ctx )
+{
+ NvError err_ = NvSuccess;
+
+ switch( function ) {
+ case 5:
+ err_ = NvRmSpiSetSignalMode_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 4:
+ err_ = NvRmSpiGetTransactionData_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 3:
+ err_ = NvRmSpiStartTransaction_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 2:
+ err_ = NvRmSpiTransaction_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 1:
+ err_ = NvRmSpiClose_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ case 0:
+ err_ = NvRmSpiOpen_dispatch_( InBuffer, InSize, OutBuffer, OutSize, Ctx );
+ break;
+ default:
+ err_ = NvError_BadParameter;
+ break;
+ }
+
+ return err_;
+}
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/Makefile b/arch/arm/mach-tegra/nvrm/io/ap15/Makefile
new file mode 100644
index 000000000000..231383d40623
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/Makefile
@@ -0,0 +1,24 @@
+ccflags-y += -DNV_IS_AVP=0
+ccflags-y += -DNV_OAL=0
+ccflags-y += -DNV_USE_FUSE_CLOCK_ENABLE=0
+ifeq ($(CONFIG_MACH_TEGRA_GENERIC_DEBUG),y)
+ccflags-y += -DNV_DEBUG=1
+else
+ccflags-y += -DNV_DEBUG=0
+endif
+
+obj-y += ap15rm_analog.o
+obj-y += rm_dma_hw_private.o
+obj-y += ap15rm_dma_hw_private.o
+obj-y += ap15rm_slink_hw_private.o
+obj-y += rm_common_slink_hw_private.o
+obj-y += ap15rm_i2c.o
+obj-y += ap15rm_pwm.o
+obj-y += ap15rm_gpio_vi.o
+obj-y += nvrm_dma.o
+obj-y += nvrm_gpio.o
+obj-y += nvrm_gpio_private.o
+obj-y += nvrm_gpio_stub_helper.o
+obj-y += ap15rm_dma_intr.o
+obj-y += rm_spi_hw_private.o
+obj-y += rm_spi_slink.o
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_analog.c b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_analog.c
new file mode 100644
index 000000000000..3c6bc3e7c523
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_analog.c
@@ -0,0 +1,2175 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvrm_structure.h"
+#include "nvrm_analog.h"
+#include "nvrm_drf.h"
+#include "nvassert.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_power.h"
+#include "ap16/arapb_misc.h"
+#include "ap15/arclk_rst.h"
+#include "ap15/arfuse.h"
+#include "nvodm_query.h"
+#include "nvodm_pmu.h"
+#include "nvrm_clocks.h"
+#include "nvrm_module.h"
+#include "ap20/arusb.h"
+
+/**
+ * Structure defining the fields for USB UTMI clocks delay Parameters.
+ */
+typedef struct UsbPllDelayParamsRec
+{
+ // Pll-U Enable Delay Count
+ NvU8 EnableDelayCount;
+ //PLL-U Stable count
+ NvU8 StableCount;
+ //Pll-U Active delay count
+ NvU8 ActiveDelayCount;
+ //PLL-U Xtal frequency count
+ NvU8 XtalFreqCount;
+} UsbPllDelayParams;
+
+/*
+ * Set of oscillator frequencies supported
+ */
+typedef enum
+{
+ NvRmClocksOscFreq_13_MHz = 0x0,
+ NvRmClocksOscFreq_19_2_MHz,
+ NvRmClocksOscFreq_12_MHz,
+ NvRmClocksOscFreq_26_MHz,
+ NvRmClocksOscFreq_Num, // dummy to get number of frequencies
+ NvRmClocksOscFreq_Force32 = 0x7fffffff
+} NvRmClocksOscFreq;
+
+// Possible Oscillator Frequecies in KHz for mapping the index
+NvRmFreqKHz s_RmOscFrequecy [NvRmClocksOscFreq_Num] =
+{
+ 13000, // 13 Mega Hertz
+ 19200,// 19.2 Mega Hertz
+ 12000,// 12 Mega Hertz
+ 26000 // 26 Mega Hertz
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// USB PLL CONFIGURATION & PARAMETERS: refer to the arapb_misc_utmip.spec file.
+///////////////////////////////////////////////////////////////////////////////
+// PLL CONFIGURATION & PARAMETERS for different clock generators:
+//-----------------------------------------------------------------------------
+// Reference frequency 13.0MHz 19.2MHz 12.0MHz 26.0MHz
+// ----------------------------------------------------------------------------
+// PLLU_ENABLE_DLY_COUNT 02 (02h) 03 (03h) 02 (02h) 04 (04h)
+// PLLU_STABLE_COUNT 51 (33h) 75 (4Bh) 47 (2Fh) 102 (66h)
+// PLL_ACTIVE_DLY_COUNT 05 (05h) 06 (06h) 04 (04h) 09 (09h)
+// XTAL_FREQ_COUNT 127 (7Fh) 187 (BBh) 118 (76h) 254 (FEh)
+///////////////////////////////////////////////////////////////////////////////
+static const UsbPllDelayParams s_UsbPllDelayParams[NvRmClocksOscFreq_Num] =
+{
+ //ENABLE_DLY, STABLE_CNT, ACTIVE_DLY, XTAL_FREQ_CNT
+ {0x02, 0x33, 0x05, 0x7F}, // For NvRmClocksOscFreq_13_MHz,
+ {0x03, 0x4B, 0x06, 0xBB}, // For NvRmClocksOscFreq_19_2_MHz
+ {0x02, 0x2F, 0x04, 0x76}, // For NvRmClocksOscFreq_12_MHz
+ {0x04, 0x66, 0x09, 0xFE} // For NvRmClocksOscFreq_26_MHz
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// USB Debounce values IdDig, Avalid, Bvalid, VbusValid, VbusWakeUp, and SessEnd.
+// Each of these signals have their own debouncer and for each of those one out
+// of 2 debouncing times can be chosen (BIAS_DEBOUNCE_A or BIAS_DEBOUNCE_B.)
+//
+// The values of DEBOUNCE_A and DEBOUNCE_B are calculated as follows:
+// 0xffff -> No debouncing at all
+// <n> ms = <n> *1000 / (1/19.2MHz) / 4
+// So to program a 1 ms debounce for BIAS_DEBOUNCE_A, we have:
+// BIAS_DEBOUNCE_A[15:0] = 1000 * 19.2 / 4 = 4800 = 0x12c0
+// We need to use only DebounceA, We dont need the DebounceB
+// values, so we can keep those to default.
+///////////////////////////////////////////////////////////////////////////////
+static const NvU32 s_UsbBiasDebounceATime[NvRmClocksOscFreq_Num] =
+{
+ /* Ten milli second delay for BIAS_DEBOUNCE_A */
+ 0x7EF4, // For NvRmClocksOscFreq_13_MHz,
+ 0xBB80, // For NvRmClocksOscFreq_19_2_MHz
+ 0x7530, // For NvRmClocksOscFreq_12_MHz
+ 0xFDE8 // For NvRmClocksOscFreq_26_MHz
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Tracking Length Time: The tracking circuit of the bias cell consumes a
+// measurable portion of the USB idle power To curtail this power consumption
+// the bias pad has added a PD_TDK signal to power down the bias cell. It is
+// estimated that after 20microsec of bias cell operation the PD_TRK signal can
+// be turned high to sve power. This can be automated by programming a timing
+// interval as given in the below structure.
+static const NvU32 s_UsbBiasTrkLengthTime[NvRmClocksOscFreq_Num] =
+{
+ /* 20 micro seconds delay after bias cell operation */
+ 5, // For NvBootClocksOscFreq_13,
+ 7, // For NvBootClocksOscFreq_19_2
+ 5, // For NvBootClocksOscFreq_12
+ 9 // For NvBootClocksOscFreq_26
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// The following arapb_misc_utmip.spec fields need to be programmed to ensure
+// correct operation of the UTMIP block:
+// Production settings :
+// 'HS_SYNC_START_DLY' : 9,
+// 'IDLE_WAIT' : 17,
+// 'ELASTIC_LIMIT' : 16,
+// All other fields can use the default reset values.
+// Setting the fields above, together with default values of the other fields,
+// results in programming the registers below as follows:
+// UTMIP_HSRX_CFG0 = 0x9168c000
+// UTMIP_HSRX_CFG1 = 0x13
+///////////////////////////////////////////////////////////////////////////////
+//UTMIP Idle Wait Delay
+static const NvU8 s_UtmipIdleWaitDelay = 17;
+//UTMIP Elastic limit
+static const NvU8 s_UtmipElasticLimit = 16;
+//UTMIP High Speed Sync Start Delay
+static const NvU8 s_UtmipHsSyncStartDelay = 9;
+
+// Reset USB host controller
+static NvBool s_IsUSBResetRequired = NV_TRUE;
+
+static NvError
+NvRmPrivTvDcControl( NvRmDeviceHandle hDevice, NvBool enable, NvU32 inst,
+ void *Config, NvU32 ConfigLength )
+{
+ NvRmAnalogTvDacConfig *cfg;
+ NvU32 ctrl, source;
+ NvU32 src_id;
+ NvU32 src_inst;
+
+ NV_ASSERT( ConfigLength == 0 ||
+ ConfigLength == sizeof(NvRmAnalogTvDacConfig) );
+
+ if( enable )
+ {
+ cfg = (NvRmAnalogTvDacConfig *)Config;
+ NV_ASSERT( cfg );
+
+ src_id = NVRM_MODULE_ID_MODULE( cfg->Source );
+ src_inst = NVRM_MODULE_ID_INSTANCE( cfg->Source );
+
+ ctrl = NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_IDDQ, DISABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_POWERDOWN, DISABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_DETECT_EN, ENABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_SLEEPR, DISABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_SLEEPG, DISABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_SLEEPB, DISABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_COMPR_EN, ENABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_COMPG_EN, ENABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_COMPB_EN, ENABLE );
+
+ if( src_id == NvRmModuleID_Tvo )
+ {
+ source = NV_DRF_DEF( APB_MISC_ASYNC, TVDACDINCONFIG,
+ DAC_SOURCE, TVO );
+ }
+ else
+ {
+ NV_ASSERT( src_id == NvRmModuleID_Display );
+ if( src_inst == 0 )
+ {
+ source = NV_DRF_DEF( APB_MISC_ASYNC, TVDACDINCONFIG,
+ DAC_SOURCE, DISPLAY );
+ }
+ else
+ {
+ source = NV_DRF_DEF( APB_MISC_ASYNC, TVDACDINCONFIG,
+ DAC_SOURCE, DISPLAYB );
+ }
+ }
+
+ source = NV_FLD_SET_DRF_NUM( APB_MISC_ASYNC, TVDACDINCONFIG, DAC_AMPIN,
+ cfg->DacAmplitude, source );
+ }
+ else
+ {
+ ctrl = NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_IDDQ, ENABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_POWERDOWN, ENABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_DETECT_EN, DISABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_SLEEPR, ENABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_SLEEPG, ENABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_SLEEPB, ENABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_COMPR_EN, DISABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_COMPG_EN, DISABLE )
+ | NV_DRF_DEF( APB_MISC_ASYNC, TVDACCNTL, DAC_COMPB_EN, DISABLE );
+ source = NV_DRF_DEF( APB_MISC_ASYNC, TVDACDINCONFIG,
+ DAC_SOURCE, TVDAC_OFF );
+ }
+
+ NV_REGW( hDevice, NvRmModuleID_Misc, 0, APB_MISC_ASYNC_TVDACCNTL_0,
+ ctrl );
+ NV_REGW( hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_ASYNC_TVDACDINCONFIG_0, source );
+
+ return NvSuccess;
+}
+
+static NvError
+NvRmPrivVideoInputControl( NvRmDeviceHandle hDevice, NvBool enable,
+ NvU32 inst, void *Config, NvU32 ConfigLength )
+{
+ NvU32 val;
+
+ NV_ASSERT(ConfigLength == 0);
+ NV_ASSERT(Config == 0);
+ NV_ASSERT(inst == 0);
+
+ if( enable )
+ {
+ val = NV_DRF_DEF( APB_MISC_ASYNC, VCLKCTRL, VCLK_PAD_IE, ENABLE );
+ }
+ else
+ {
+ val = NV_DRF_DEF( APB_MISC_ASYNC, VCLKCTRL, VCLK_PAD_IE, DISABLE );
+ }
+
+ NV_REGW( hDevice, NvRmModuleID_Misc, 0, APB_MISC_ASYNC_VCLKCTRL_0,
+ val );
+
+ return NvSuccess;
+}
+
+
+static void
+NvRmPrivUsbfSetUlpiLinkTrimmers(
+ NvRmDeviceHandle hDevice,
+ NvU32 instance)
+{
+ NvU32 RegVal = 0;
+
+ // Bypass the Pin Mux on the ULPI outputs and set the trimmer values for inputs to 3
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_SPARE_CFG0_0);
+ RegVal = RegVal & ( ~( (0x1 << 13) | (0xf << 4) | (0xf << 28) ) );
+ //bit 13 : data output pinmux bypass enable: set to 1
+ //bit 4 : data input trimmer load enable (toggle)
+ //bit [7:5] : data input trimmer value: set to 3
+ //bit 28 : data input trimmer2 load enable (toggle)
+ //bit [31:29] : data input trimmer2 value: set to 3
+ RegVal = RegVal | ( (0x1 << 13) | (0x0 << 5) | (0x0 << 4) | (0x0 << 29) | (0x0 << 28) );
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_SPARE_CFG0_0, RegVal);
+
+ // wait 10 us
+ NvOsWaitUS(10);
+ // toggle bits 4 and 28 to latch the trimmer values
+ RegVal = RegVal | ( (0x1 << 4) | (0x1 << 28) );
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_SPARE_CFG0_0, RegVal);
+ // wait 10 us
+ NvOsWaitUS(10);
+}
+
+
+static void
+NvRmPrivUsbfSetUlpiNullTrimmers(
+ NvRmDeviceHandle hDevice,
+ NvU32 instance)
+{
+ NvU32 RegVal = 0;
+ // Configure 60M clock for USB2 - ULPI controller
+
+ // Set up to use PLLU at 60 MHz and keep USB PHY PLL in reset
+ // bit 27 : bypass 60 MHz Div5 for PLLU - set to 1
+ // bit 9 : nullphy_pll_source - use USB_PHY_PLL output (set to 0) :
+ // Workaround: set to 1 to use PLLU Output at 12 MHz
+
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_SPARE_CFG0_0);
+ RegVal &= (~( 0x1 << 27) );
+ RegVal |= ((0x1 << 9));
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_SPARE_CFG0_0,
+ RegVal);
+
+ // Set the trimmers
+ // bit 4 : data input trimmer load enable (toggle)
+ // bit [7:5] : data input trimmer value - set to 3
+ // bit 28 : data input trimmer2 load enable - (toggle)
+ // bit [31:29] : data input trimmer2 value - set to 3
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_SPARE_CFG0_0);
+ RegVal = RegVal | ( (0x7 << 29) | (0x0 << 28) | (0x7 << 5) | (0x0 << 4) );
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_SPARE_CFG0_0, RegVal);
+
+ // wait 10 us
+ NvOsWaitUS(10);
+ // toggle bits 4 and 28 to latch the trimmer values
+ RegVal = RegVal | ( (0x1 << 4) | (0x1 << 28) );
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_SPARE_CFG0_0, RegVal);
+ // wait 10 us
+ NvOsWaitUS(10);
+}
+
+
+static void
+NvRmPrivUsbfUlpiClockControl(
+ NvRmDeviceHandle hDevice,
+ NvU32 instance,
+ NvBool Enable)
+{
+ NvU32 RegVal = 0;
+
+ if (Enable)
+ {
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB2_RST, DISABLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB2_CE, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0, RegVal);
+
+ NvOsMutexLock(hDevice->CarMutex);
+ // Bring Out of reset
+ RegVal = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0);
+ RegVal = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, RST_DEVICES_L,
+ SWR_USBD_RST, DISABLE, RegVal);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0, RegVal);
+
+ NvOsMutexUnlock(hDevice->CarMutex);
+
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_SPARE_CFG0_0);
+ RegVal = RegVal & (0xf);
+
+ RegVal = RegVal | ( (0x0 << 29) | (0x0 << 28) | (0x0 << 27) | (0 << 22) |
+ (0x0 << 21) | (0x0 << 16) | (0x0 << 15) | (0x1 << 14) |
+ (0x1 << 13) | (0x0 << 12) | (0x1 << 11) | (0x1 << 10) |
+ (0x1 << 9) | (0x0 << 5) | (0x0 << 4) );
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_SPARE_CFG0_0, RegVal);
+
+ // wait 10 us
+ NvOsWaitUS(10);
+ }
+ else
+ {
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB2_RST, ENABLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB2_CE, DISABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0, RegVal);
+
+ if (!(NV_DRF_VAL(APB_MISC_PP, MISC_USB_CLK_RST_CTL, MISC_USB_CE, RegVal)))
+ {
+ // Enable reset
+ NvOsMutexLock(hDevice->CarMutex);
+ RegVal = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0);
+ RegVal = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, RST_DEVICES_L,
+ SWR_USBD_RST, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0, RegVal);
+ NvOsMutexUnlock(hDevice->CarMutex);
+ }
+ }
+}
+
+static void
+NvRmPrivUsbfEnableVbusInterrupt(
+ NvRmDeviceHandle hDevice)
+{
+ NvU32 RegVal = 0;
+
+ //enable VBUS interrupt for cable detection when controller is Off
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_SENSORS_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, USB_PHY_VBUS_SENSORS,
+ A_SESS_VLD_INT_EN, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_SENSORS_0, RegVal);
+}
+
+
+static void
+NvRmPrivUsbfDisableVbusInterrupt(
+ NvRmDeviceHandle hDevice)
+{
+ NvU32 RegVal = 0;
+
+ //disable the VBUS interrupt,
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_SENSORS_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, USB_PHY_VBUS_SENSORS,
+ A_SESS_VLD_INT_EN, DISABLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, USB_PHY_VBUS_SENSORS,
+ A_SESS_VLD_CHG_DET, SET, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_SENSORS_0, RegVal);
+}
+
+
+static NvBool
+NvRmPrivUsbfIsCableConnected(
+ NvRmDeviceHandle hDevice)
+{
+ NvU32 RegVal = 0;
+ NvBool CableConnected = NV_FALSE;
+
+ // Check for cable connection
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_SENSORS_0);
+ if (NV_DRF_VAL(APB_MISC, PP_USB_PHY_VBUS_SENSORS, A_SESS_VLD_STS, RegVal))
+ {
+ CableConnected = NV_TRUE;
+ }
+ //disable the interrupt, if we detect the cable connection/dis connection
+ NvRmPrivUsbfDisableVbusInterrupt(hDevice);
+
+ if (!CableConnected)
+ {
+ NvRmPrivUsbfEnableVbusInterrupt(hDevice);
+ }
+
+ return CableConnected;
+}
+
+static NvError
+NvRmPrivUsbfWaitForPhyClock(
+ NvRmDeviceHandle hDevice,
+ NvBool Enable)
+{
+ NvU32 TimeOut = 100000; // 100 milli seconds timeout before H/W gives up;
+ NvU32 PhyClockValidStatus = APB_MISC_PP_MISC_USB_OTG_0_PCLKVLD_UNSET;
+ NvU32 PhyClkValid = 0;
+ NvU32 RegVal = 0;
+ // If Enable is true, check for PHY Clock Vailid Set
+ // Else check for PHY Clock Unset
+ if (Enable)
+ PhyClockValidStatus = APB_MISC_PP_MISC_USB_OTG_0_PCLKVLD_SET;
+ // Wait for the phy clock to become valid or hardware timeout
+ do {
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_MISC_USB_OTG_0);
+ PhyClkValid = NV_DRF_VAL(APB_MISC_PP, MISC_USB_OTG, PCLKVLD, RegVal);
+ if (!TimeOut)
+ {
+ return NvError_Timeout;
+ }
+ NvOsWaitUS(1);
+ TimeOut--;
+ } while (PhyClkValid != PhyClockValidStatus);
+
+ return NvSuccess;
+}
+
+static NvBool
+NvRmPrivUsbfIsChargerDetected(
+ NvRmDeviceHandle hDevice, NvBool EnableDetection)
+{
+ NvU32 RegVal = 0;
+ NvBool ChargerConnected = NV_FALSE;
+ #define TDP_SRC_ON_MS 100
+
+ if (EnableDetection)
+ {
+ // Enable charger detection logic
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_BAT_CHRG_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BAT_CHRG_CFG0,
+ UTMIP_OP_SRC_EN, 1, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BAT_CHRG_CFG0,
+ UTMIP_ON_SINK_EN, 1, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_BAT_CHRG_CFG0_0, RegVal);
+ // Source should be on for 100 ms as per USB charging spec
+ NvOsSleepMS(TDP_SRC_ON_MS);
+ }
+
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0);
+ if (NV_DRF_VAL(APB_MISC, PP_USB_PHY_VBUS_WAKEUP_ID, VDAT_DET_STS, RegVal))
+ {
+ //disable the interrupt, if we detect the charger
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC, PP_USB_PHY_VBUS_WAKEUP_ID,
+ VDAT_DET_INT_EN, DISABLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC, PP_USB_PHY_VBUS_WAKEUP_ID,
+ VDAT_DET_CHG_DET, SET, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0, RegVal);
+ ChargerConnected = NV_TRUE;
+ // Disable charger detection logic
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_BAT_CHRG_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BAT_CHRG_CFG0,
+ UTMIP_OP_SRC_EN, 0, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BAT_CHRG_CFG0,
+ UTMIP_ON_SINK_EN, 0, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_BAT_CHRG_CFG0_0, RegVal);
+
+ }
+
+ return ChargerConnected;
+}
+
+static void
+NvRmPrivUsbfChargerDetection(
+ NvRmDeviceHandle hDevice,
+ NvBool Enable)
+{
+ // These values (in milli second) are taken from the battery charging spec.
+ #define TDP_SRC_ON_MS 100
+ #define TDPSRC_CON_MS 40
+ NvU32 RegVal = 0;
+
+ if (Enable)
+ {
+ // Enable charger detection logic
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_BAT_CHRG_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BAT_CHRG_CFG0,
+ UTMIP_OP_SRC_EN, 1, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BAT_CHRG_CFG0,
+ UTMIP_ON_SINK_EN, 1, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_BAT_CHRG_CFG0_0, RegVal);
+ // Source should be on for 100 ms as per USB charging spec
+ NvOsSleepMS(TDP_SRC_ON_MS);
+ // Check if charger is connected, enable interrupt to get the event
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0);
+ if (NV_DRF_VAL(APB_MISC, PP_USB_PHY_VBUS_WAKEUP_ID, VDAT_DET_STS, RegVal))
+ {
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0,
+ NV_DRF_DEF(APB_MISC, PP_USB_PHY_VBUS_WAKEUP_ID,
+ VDAT_DET_INT_EN, ENABLE));
+ }
+ else
+ {
+ // If charger is not connected disable the interrupt
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC, PP_USB_PHY_VBUS_WAKEUP_ID,
+ VDAT_DET_INT_EN, DISABLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC, PP_USB_PHY_VBUS_WAKEUP_ID,
+ VDAT_DET_CHG_DET, SET, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0, RegVal);
+ }
+ }
+ else
+ {
+ // Disable charger detection logic
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_BAT_CHRG_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BAT_CHRG_CFG0,
+ UTMIP_OP_SRC_EN, 0, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BAT_CHRG_CFG0,
+ UTMIP_ON_SINK_EN, 0, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_BAT_CHRG_CFG0_0, RegVal);
+ // Delay of 40 ms before we pull the D+ as per battery charger spec.
+ NvOsSleepMS(TDPSRC_CON_MS);
+ }
+}
+
+
+static NvError
+NvRmPrivUsb3ConfigureUtmipPhy(
+ NvRmDeviceHandle hDevice)
+{
+ NvU32 RegVal = 0;
+ NvU32 TimeOut = 100000; // 100 milli seconds timeout before H/W gives up;
+ NvU32 PhyClkValid = 0;
+ NvRmFreqKHz OscFreqKz = 0;
+ NvU32 FreqIndex;
+
+ // Get the Oscillator Frequency
+ OscFreqKz = NvRmPowerGetPrimaryFrequency(hDevice);
+
+ // Get the Oscillator Frequency Index
+ for (FreqIndex = 0; FreqIndex < NvRmClocksOscFreq_Num; FreqIndex++)
+ {
+ if (OscFreqKz == s_RmOscFrequecy[FreqIndex])
+ {
+ // Bail Out if frequecy matches with the supported frequency
+ break;
+ }
+ }
+ // If Index is equal to the maximum supported frequency count
+ // There is a mismatch of the frequecy, so returning since the
+ // frequency is not supported.
+ if (FreqIndex >= NvRmClocksOscFreq_Num)
+ {
+ return NvError_NotSupported;
+ }
+
+
+ /**Hold UTMIP3 PHY in reset by writing UTMIP_RESET bit in USB3_IF_USB_SUSP_CTRL
+ register to 1. **/
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_IF_USB_SUSP_CTRL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB3_IF, USB_SUSP_CTRL, UTMIP_RESET, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_IF_USB_SUSP_CTRL_0, RegVal);
+
+
+ /*1. FORCE_PD_POWERDOWN, FORCE_PD2_POWERDOWN, FORCE_PDZI_POWERDOWN fields in
+ UTMIP_XCVR_CFG0 register. **/
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_XCVR_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, XCVR_CFG0, UTMIP_FORCE_PD_POWERDOWN,
+ 0x0, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, XCVR_CFG0, UTMIP_FORCE_PD2_POWERDOWN,
+ 0x0, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, XCVR_CFG0, UTMIP_FORCE_PDZI_POWERDOWN,
+ 0x0, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_XCVR_CFG0_0, RegVal);
+
+
+ // USB Power Up sequence
+ // Power Up OTG and Bias circuitry
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_BIAS_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BIAS_CFG0,
+ UTMIP_OTGPD, 0, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BIAS_CFG0,
+ UTMIP_BIASPD, 0, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_BIAS_CFG0_0, RegVal);
+
+ /**OTGOD and BIASPD fields in UTMIP_BIAS_CFG0 register. **/
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_BIAS_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, BIAS_CFG0, UTMIP_OTGPD,
+ 0x0, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, BIAS_CFG0, UTMIP_BIASPD,
+ 0x0, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_BIAS_CFG0_0, RegVal);
+
+ /* FORCE_PDDISC_POWERDOWEN , FORCE_PDCHRP_POWERDOWN, FORCE_PDDR_POWERDOWN
+ field in UTMIP_XCVR_CFG1 register. **/
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_XCVR_CFG1_0);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, XCVR_CFG1, UTMIP_FORCE_PDDISC_POWERDOWN,
+ 0x0, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, XCVR_CFG1, UTMIP_FORCE_PDCHRP_POWERDOWN,
+ 0x0, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, XCVR_CFG1, UTMIP_FORCE_PDDR_POWERDOWN,
+ 0x0, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_XCVR_CFG1_0, RegVal);
+
+ /**Enable UTMIP3 interface by setting UTMIP_PHY_ENB in USB3_IF_USB_SUSP_CTRL
+ register to 1. **/
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_IF_USB_SUSP_CTRL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB3_IF, USB_SUSP_CTRL, UTMIP_PHY_ENB, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_IF_USB_SUSP_CTRL_0, RegVal);
+
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_MISC_CFG1_0);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, MISC_CFG1,
+ UTMIP_PLLU_STABLE_COUNT,
+ s_UsbPllDelayParams[FreqIndex].StableCount, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, MISC_CFG1,
+ UTMIP_PLL_ACTIVE_DLY_COUNT,
+ s_UsbPllDelayParams[FreqIndex].ActiveDelayCount, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_MISC_CFG1_0, RegVal);
+
+
+ // Set PLL enable delay count and Crystal frequency count
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_PLL_CFG1_0);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, PLL_CFG1,
+ UTMIP_PLLU_ENABLE_DLY_COUNT,
+ s_UsbPllDelayParams[FreqIndex].EnableDelayCount, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP,
+ PLL_CFG1, UTMIP_XTAL_FREQ_COUNT,
+ s_UsbPllDelayParams[FreqIndex].XtalFreqCount, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_PLL_CFG1_0, RegVal);
+
+ // Program 1ms Debounce time for VBUS to become valid.
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_DEBOUNCE_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, DEBOUNCE_CFG0, UTMIP_BIAS_DEBOUNCE_A,
+ s_UsbBiasDebounceATime[FreqIndex], RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_DEBOUNCE_CFG0_0, RegVal);
+
+ /** pll_parameters_configured **/
+
+ // Configure the UTMIP_HS_SYNC_START_DLY
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_HSRX_CFG1_0);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, HSRX_CFG1,
+ UTMIP_HS_SYNC_START_DLY,
+ s_UtmipHsSyncStartDelay, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_HSRX_CFG1_0, RegVal);
+
+ /* Configure the UTMIP_IDLE_WAIT and UTMIP_ELASTIC_LIMIT
+ * Setting these fields, together with default values of the other
+ * fields, results in programming the registers below as follows:
+ * UTMIP_HSRX_CFG0 = 0x9168c000
+ * UTMIP_HSRX_CFG1 = 0x13
+ */
+
+ // Set PLL enable delay count and Crystal frequency count
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_HSRX_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP, HSRX_CFG0,
+ UTMIP_IDLE_WAIT,
+ s_UtmipIdleWaitDelay, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB3_UTMIP,
+ HSRX_CFG0, UTMIP_ELASTIC_LIMIT,
+ s_UtmipElasticLimit, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_UTMIP_HSRX_CFG0_0, RegVal);
+
+ /**Release reset to UTMIP3 by writing 0 to UTMIP_RESET field in
+ USB3_IF_USB_SUSP_CTRL register. ***/
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_IF_USB_SUSP_CTRL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB3_IF, USB_SUSP_CTRL, UTMIP_RESET, DISABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_IF_USB_SUSP_CTRL_0, RegVal);
+
+
+ /**Wait until PHY clock comes up by checking for USB_PHY_CLK_VALID bit in
+ USB3_IF_USB_SUSP_CTRL register **/
+ do
+ {
+ //Wait for the phy clock to become valid
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_IF_USB_SUSP_CTRL_0);
+ PhyClkValid = NV_DRF_VAL(USB3_IF, USB_SUSP_CTRL, USB_PHY_CLK_VALID, RegVal);
+
+ if (!TimeOut)
+ {
+ break;
+ }
+ NvOsWaitUS(1);
+ TimeOut--;
+ } while (!PhyClkValid);
+
+ /* We can only do this once PHY clock is up. Disable ICUSB interface (it is enabled by default) */
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB2_CONTROLLER_1_USB2D_ICUSB_CTRL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ICUSB_CTRL, IC_ENB1, DISABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB2_CONTROLLER_1_USB2D_ICUSB_CTRL_0, RegVal);
+
+ /**Program the USB3 controller to use UTMIP3 PHY by setting the PTS field in
+ USB2_CONTROLLER_USB2D_PORTSC1 register to UTMIP (2'b00). **/
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB2_CONTROLLER_USB2D_PORTSC1_0);
+
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER, USB2D_PORTSC1, PTS, UTMI, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER, USB2D_PORTSC1, STS, PARALLEL_IF, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 2, USB2_CONTROLLER_USB2D_PORTSC1_0, RegVal);
+
+ return NvSuccess;
+}
+
+
+static NvError
+NvRmPrivUsbfConfigureUtmipPhy(
+ NvRmDeviceHandle hDevice)
+{
+ NvU32 RegVal = 0;
+ NvRmFreqKHz OscFreqKz = 0;
+ NvU32 FreqIndex;
+
+ // Get the Oscillator Frequency
+ OscFreqKz = NvRmPowerGetPrimaryFrequency(hDevice);
+
+ // Get the Oscillator Frequency Index
+ for (FreqIndex = 0; FreqIndex < NvRmClocksOscFreq_Num; FreqIndex++)
+ {
+ if (OscFreqKz == s_RmOscFrequecy[FreqIndex])
+ {
+ // Bail Out if frequecy matches with the supported frequency
+ break;
+ }
+ }
+ // If Index is equal to the maximum supported frequency count
+ // There is a mismatch of the frequecy, so returning since the
+ // frequency is not supported.
+ if (FreqIndex >= NvRmClocksOscFreq_Num)
+ {
+ return NvError_NotSupported;
+ }
+
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_TX_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_TX_CFG0, UTMIP_FS_PREAMBLE_J,
+ 0x1, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_TX_CFG0_0, RegVal);
+
+ // Configure the UTMIP_IDLE_WAIT and UTMIP_ELASTIC_LIMIT
+ // Setting these fields, together with default values of the other
+ // fields, results in programming the registers below as follows:
+ // UTMIP_HSRX_CFG0 = 0x9168c000
+ // UTMIP_HSRX_CFG1 = 0x13
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_HSRX_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_HSRX_CFG0, UTMIP_IDLE_WAIT,
+ s_UtmipIdleWaitDelay, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_HSRX_CFG0, UTMIP_ELASTIC_LIMIT,
+ s_UtmipElasticLimit, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_HSRX_CFG0_0, RegVal);
+
+ // Configure the UTMIP_HS_SYNC_START_DLY
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_HSRX_CFG1_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_HSRX_CFG1,
+ UTMIP_HS_SYNC_START_DLY, s_UtmipHsSyncStartDelay, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_HSRX_CFG1_0, RegVal);
+
+ // Program 1ms Debounce time for VBUS to become valid.
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_DEBOUNCE_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_DEBOUNCE_CFG0,
+ UTMIP_BIAS_DEBOUNCE_A,s_UsbBiasDebounceATime[FreqIndex],
+ RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_DEBOUNCE_CFG0_0, RegVal);
+
+ // PLL Delay CONFIGURATION settings
+ // The following parameters control the bring up of the plls:
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_MISC_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_MISC_CFG0,
+ UTMIP_SUSPEND_EXIT_ON_EDGE, 0, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_MISC_CFG0_0, RegVal);
+
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_MISC_CFG1_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_MISC_CFG1,
+ UTMIP_PLLU_STABLE_COUNT,
+ s_UsbPllDelayParams[FreqIndex].StableCount, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_MISC_CFG1,
+ UTMIP_PLL_ACTIVE_DLY_COUNT,
+ s_UsbPllDelayParams[FreqIndex].ActiveDelayCount, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_MISC_CFG1_0, RegVal);
+
+ // Set PLL enable delay count and Crystal frequency count
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_PLL_CFG1_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_PLL_CFG1,
+ UTMIP_PLLU_ENABLE_DLY_COUNT,
+ s_UsbPllDelayParams[FreqIndex].EnableDelayCount, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC,
+ UTMIP_PLL_CFG1, UTMIP_XTAL_FREQ_COUNT,
+ s_UsbPllDelayParams[FreqIndex].XtalFreqCount, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_PLL_CFG1_0, RegVal);
+
+ // On AP20 FPGA we do not have VBUS_WAKEUP signal for cable detection.
+ // We use A_SESS_VLD that comes from the external UTMIP PHY
+ if (NvRmPrivGetExecPlatform(hDevice) == ExecPlatform_Fpga)
+ {
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_USB_PHY_PARAM_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC, PP_USB_PHY_PARAM, VS_CTL, A_SESS_VLD, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_USB_PHY_PARAM_0, RegVal);
+ }
+ return NvSuccess;
+}
+
+static void
+NvRmPrivUsbfPowerControl(
+ NvRmDeviceHandle hDevice,
+ NvU32 Instance,
+ NvBool Enable)
+{
+ NvU32 RegVal = 0;
+ const NvOdmUsbProperty *pUsbProperty = NULL;
+ NvU32 TimeOut = 100000; // 100 milli seconds timeout before H/W gives up;
+ static NvU32 s_XcvrSetupValue = 0;
+ static NvBool s_ReadFuseValue = NV_FALSE;
+
+ if (Enable)
+ {
+ // USB Power Up sequence
+ // Power Up OTG and Bias circuitry
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_BIAS_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BIAS_CFG0,
+ UTMIP_OTGPD, 0, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BIAS_CFG0,
+ UTMIP_BIASPD, 0, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_BIAS_CFG0_0, RegVal);
+
+ if (!s_ReadFuseValue)
+ {
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Enable fuse clock
+ NvRmPowerModuleClockControl(hDevice, NvRmModuleID_Fuse, 0, NV_TRUE);
+#endif
+ // Enable fuse values to be visible before reading the fuses.
+ RegVal = NV_REGR( hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0 );
+ RegVal = NV_FLD_SET_DRF_NUM( CLK_RST_CONTROLLER, MISC_CLK_ENB,
+ CFG_ALL_VISIBLE, 1, RegVal );
+ NV_REGW( hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0, RegVal );
+
+ // Read the spare register fuses and redundancy fuses for setting up USB
+ // UTMIP_XCVR_SETUP value for proper EYE diagram.
+ RegVal = NV_REGR( hDevice, NvRmModuleID_Fuse, 0, FUSE_FUSEDATA21_0);
+
+ s_XcvrSetupValue = (NV_DRF_VAL(FUSE, FUSEDATA21,FUSEDATA_SPARE_BIT_10__PRI_ALIAS_0, RegVal) |
+ NV_DRF_VAL(FUSE, FUSEDATA21, FUSEDATA_SPARE_BIT_13__PRI_ALIAS_0, RegVal)) << 0;
+ s_XcvrSetupValue |= (NV_DRF_VAL(FUSE, FUSEDATA21, FUSEDATA_SPARE_BIT_11__PRI_ALIAS_0, RegVal) |
+ NV_DRF_VAL(FUSE, FUSEDATA21, FUSEDATA_SPARE_BIT_14__PRI_ALIAS_0, RegVal)) << 1;
+ s_XcvrSetupValue |= (NV_DRF_VAL(FUSE, FUSEDATA21, FUSEDATA_SPARE_BIT_12__PRI_ALIAS_0, RegVal) |
+ NV_DRF_VAL(FUSE, FUSEDATA21, FUSEDATA_SPARE_BIT_15__PRI_ALIAS_0, RegVal)) << 2;
+ // Only UTMIP_XCVR_SETUP[3-1] need to be programmed with the fuse vlaue
+ // UTMIP_XCVR_SETUP[0] must be equal to 0
+ s_XcvrSetupValue = s_XcvrSetupValue << 1;
+
+ // Disable fuse values visibility, we already read the data
+ RegVal = NV_REGR( hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0 );
+ RegVal = NV_FLD_SET_DRF_NUM( CLK_RST_CONTROLLER, MISC_CLK_ENB,
+ CFG_ALL_VISIBLE, 0, RegVal );
+ NV_REGW( hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0, RegVal );
+ s_ReadFuseValue = NV_TRUE;
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Disable fuse clock
+ NvRmPowerModuleClockControl(hDevice, NvRmModuleID_Fuse, 0, NV_FALSE);
+#endif
+ }
+
+ //NvOsDebugPrintf("s_XcvrSetupValue from fuse [0x%x] \n", s_XcvrSetupValue);
+
+ // Turn on power in the tranciver
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_XCVR_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_XCVR_CFG0,
+ UTMIP_FORCE_PDZI_POWERDOWN, 0, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_XCVR_CFG0,
+ UTMIP_FORCE_PD2_POWERDOWN, 0, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_XCVR_CFG0,
+ UTMIP_FORCE_PD_POWERDOWN, 0, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_XCVR_CFG0,
+ UTMIP_XCVR_SETUP, s_XcvrSetupValue, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_XCVR_CFG0_0, RegVal);
+
+ // Enable Batery charge enabling bit, set to '0' for enable
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_BAT_CHRG_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BAT_CHRG_CFG0,
+ UTMIP_PD_CHRG, 0, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_BAT_CHRG_CFG0_0, RegVal);
+
+ if (hDevice->ChipId.Id == 0x16)
+ {
+ if(s_IsUSBResetRequired)
+ {
+ NvOsMutexLock(hDevice->CarMutex);
+ // Put the controller in reset
+ RegVal = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0);
+ RegVal = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, RST_DEVICES_L,
+ SWR_USBD_RST, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0, RegVal);
+ NvOsMutexUnlock(hDevice->CarMutex);
+ s_IsUSBResetRequired = NV_FALSE;
+ }
+
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB_CE, ENABLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB_RST, DISABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0, RegVal);
+ }
+ NvOsMutexLock(hDevice->CarMutex);
+ // Bring Out of reset
+ RegVal = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0);
+ RegVal = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, RST_DEVICES_L,
+ SWR_USBD_RST, DISABLE, RegVal);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0, RegVal);
+ NvOsMutexUnlock(hDevice->CarMutex);
+
+ }
+ else
+ {
+ // USB Power down sequence
+ // Power down OTG and Bias circuitry
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_BIAS_CFG0_0);
+ // Query if Internal Phy is going to wake up the usb controller upon cable insertion.
+ pUsbProperty = NvOdmQueryGetUsbProperty(NvOdmIoModule_Usb, Instance);
+ if (!pUsbProperty->UseInternalPhyWakeup)
+ {
+ /// If not internal Phy then Use PMU interrupt for VBUS detection.
+ /// Disable the OTG bias circuitry.
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BIAS_CFG0,
+ UTMIP_OTGPD, 1, RegVal);
+ }
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BIAS_CFG0,
+ UTMIP_BIASPD, 1, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_BIAS_CFG0_0, RegVal);
+
+ // Disable Batery charge enabling bit set to '1' for disable
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_BAT_CHRG_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_BAT_CHRG_CFG0,
+ UTMIP_PD_CHRG, 1, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_UTMIP_BAT_CHRG_CFG0_0, RegVal);
+
+ // Turn off power in the tranciver
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_XCVR_CFG0_0);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_XCVR_CFG0,
+ UTMIP_FORCE_PDZI_POWERDOWN, 1, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_XCVR_CFG0,
+ UTMIP_FORCE_PD2_POWERDOWN, 1, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(APB_MISC, UTMIP_XCVR_CFG0,
+ UTMIP_FORCE_PD_POWERDOWN, 1, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_UTMIP_XCVR_CFG0_0, RegVal);
+
+ if (hDevice->ChipId.Id == 0x16)
+ {
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB_RST, ENABLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB_CE, DISABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0, RegVal);
+ if (!(NV_DRF_VAL(APB_MISC_PP, MISC_USB_CLK_RST_CTL, MISC_USB2_CE, RegVal)))
+ {
+ // Enable reset
+ NvOsMutexLock(hDevice->CarMutex);
+ RegVal = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0);
+ RegVal = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, RST_DEVICES_L,
+ SWR_USBD_RST, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0, RegVal);
+ NvOsMutexUnlock(hDevice->CarMutex);
+ }
+ }
+ else
+ {
+ // Enable reset
+ NvOsMutexLock(hDevice->CarMutex);
+ RegVal = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0);
+ RegVal = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, RST_DEVICES_L,
+ SWR_USBD_RST, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0, RegVal);
+ NvOsMutexUnlock(hDevice->CarMutex);
+ }
+ // Wait till B Session end
+ do {
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_SENSORS_0,
+ NV_DRF_DEF(APB_MISC_PP, USB_PHY_VBUS_SENSORS,
+ A_SESS_VLD_CHG_DET, SET));
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_SENSORS_0);
+ if (NV_DRF_VAL(APB_MISC_PP, USB_PHY_VBUS_SENSORS, B_SESS_END_STS, RegVal))
+ {
+ // break here once the B Session end apears.
+ break;
+ }
+ NvOsWaitUS(1);
+ TimeOut--;
+ } while (TimeOut);
+ }
+}
+
+#if 0
+static void
+NvRmPrivUsbfDisableChargerInterrupt(
+ NvRmDeviceHandle hDevice)
+{
+ NvU32 RegVal = 0;
+
+ // disable the charger Interrupt
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC, PP_USB_PHY_VBUS_WAKEUP_ID, VDAT_DET_INT_EN, DISABLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC, PP_USB_PHY_VBUS_WAKEUP_ID, VDAT_DET_CHG_DET, SET, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0, APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0, RegVal);
+}
+#endif
+
+static void NvRmUsbPrivConfigureUsbPhy(
+ NvRmDeviceHandle hDevice,
+ NvBool Enable)
+ {
+ NvU32 RegVal;
+ NvU32 TimeOut = 1000;
+ NvU32 PhyClkValid = 0;
+ NvU32 UlpiRunBit = 1;
+ //NvU32 ReadValue = 0;
+ //NvU32 i;
+
+ if(Enable)
+ {
+
+ if(s_IsUSBResetRequired)
+ {
+ // If USB1 is active by this time by KITL, do not do a car reset
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ if (!(NV_DRF_VAL(APB_MISC_PP, MISC_USB_CLK_RST_CTL, MISC_USB_CE, RegVal)))
+ {
+ NvOsMutexLock(hDevice->CarMutex);
+ // Put the controller in reset
+ RegVal = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0);
+ RegVal = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, RST_DEVICES_L,
+ SWR_USBD_RST, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0, RegVal);
+ NvOsMutexUnlock(hDevice->CarMutex);
+ }
+ s_IsUSBResetRequired = NV_FALSE;
+ }
+ // Bring controller out of reset
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB2_RST, DISABLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB2_CE, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0, RegVal);
+
+ NvOsMutexLock(hDevice->CarMutex);
+ // Bring Out of reset
+ RegVal = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0);
+ RegVal = NV_FLD_SET_DRF_DEF(CLK_RST_CONTROLLER, RST_DEVICES_L,
+ SWR_USBD_RST, DISABLE, RegVal);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_RST_DEVICES_L_0, RegVal);
+ NvOsMutexUnlock(hDevice->CarMutex);
+ // Wake-up ULPI PHY generate a postive pulse
+ //set
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_IF_USB_SUSP_CTRL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_IF, USB_SUSP_CTRL, USB_SUSP_CLR, SET,
+ RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_IF_USB_SUSP_CTRL_0, RegVal);
+
+ // wait 10 us
+ NvOsWaitUS(100);
+
+ // clear
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_IF_USB_SUSP_CTRL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_IF, USB_SUSP_CTRL,
+ USB_SUSP_CLR, UNSET, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_IF_USB_SUSP_CTRL_0, RegVal);
+
+ // Set the MISC_USB2_CLK_OVR_ON bit and update PP_MISC_USB_CLK_RST_CTL register.
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL, MISC_USB2_CLK_OVR_ON,
+ ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0, RegVal);
+
+
+ // Setting the ULPI register IndicatorPassThru to 1
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, WRITE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, 0x8, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_DATA_WR, 0x40, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ // Setting ULPI register UseExternalVbusIndicator to 1.
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, WRITE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, 0xB, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_DATA_WR, 0x80, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ }
+ else
+ {
+ // Programming the ULPI register functuion control
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, WRITE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, 0x4, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_DATA_WR, 0x4d, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+ // Resetting the ULPI register IndicatorPassThru
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, WRITE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, 0x7, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_DATA_WR, 0x0, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ // Resetting ULPI register UseExternalVbusIndicator
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, WRITE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, 0xa, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_DATA_WR, 0x86, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ // making sure vbus comparator and id are off
+ // USB Interrupt Rising
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, WRITE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, 0x0d, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_DATA_WR, 0x00, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ // USB Interrupt Falling
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, WRITE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, 0x10, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_DATA_WR, 0x00, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, WRITE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, 0x19, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_DATA_WR, 0x00, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ // Disabling ID float Rise/Fall (Carkit Enable)
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, WRITE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, 0x1D, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_DATA_WR, 0x00, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, WRITE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, 0x39, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_DATA_WR, 0x00, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+#if 0
+ // STARTING register
+ // taking the register dump for all ULPI 3317 register
+ // staring from FCR
+ //Read FCR
+ for (i = 0x4;i < 0x14;i+=0x3)
+ {
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, READ, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, i, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ ReadValue = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_DATA_RD, RegVal);
+
+ NvOsDebugPrintf("USB ULPI 3317 reg @ 0x%x value %x",i,ReadValue);
+ }
+
+ // USB IL
+ i = 0x14;
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, READ, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, i, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ ReadValue = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_DATA_RD, RegVal);
+
+ NvOsDebugPrintf("USB ULPI 3317 (USB IL)i @ 0x%x value %x",i,ReadValue);
+
+
+ // USB CARKit
+ i = 0x19;
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, READ, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, i, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ ReadValue = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_DATA_RD, RegVal);
+
+ NvOsDebugPrintf("USB ULPI 3317 (USB CARKit)i @ 0x%x value %x",i,ReadValue);
+
+ // USB CARKit IE
+ i = 0x1D;
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, READ, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, i, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ ReadValue = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_DATA_RD, RegVal);
+
+ NvOsDebugPrintf("USB ULPI 3317 (USB CARKit IE)i @ 0x%x value %x",i,ReadValue);
+
+ // USB CARKit IS
+ i = 0x20;
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, READ, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, i, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ ReadValue = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_DATA_RD, RegVal);
+
+ NvOsDebugPrintf("USB ULPI 3317 (USB CARKit IS)i @ 0x%x value %x",i,ReadValue);
+
+ // USB CARKit IL
+ i = 0x21;
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, READ, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, i, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ ReadValue = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_DATA_RD, RegVal);
+
+ NvOsDebugPrintf("USB ULPI 3317 (USB CARKit IL)i @ 0x%x value %x ",i,ReadValue);
+
+
+ // USB I/0
+ i = 0x39;
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_WAKEUP, CLEAR, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RUN, SET, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_RD_WR, READ, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_PORT, SW_DEFAULT, RegVal);
+ RegVal = NV_FLD_SET_DRF_NUM(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT,
+ ULPI_REG_ADDR, i, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0, RegVal);
+
+ UlpiRunBit = 1;
+ do
+ {
+ // check for run bit being cleared..
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_ULPI_VIEWPORT_0);
+
+ UlpiRunBit = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_RUN, RegVal);
+ } while (UlpiRunBit);
+
+ ReadValue = NV_DRF_VAL(USB2_CONTROLLER_1, USB2D_ULPI_VIEWPORT, ULPI_DATA_RD, RegVal);
+
+ NvOsDebugPrintf("USB ULPI 3317 (USB I/O)i @ 0x%x value %x",i,ReadValue);
+
+ //// done with the register dump..
+ // ENDING register dump programming..
+#endif
+
+ // clear WKCN/WKDS/WKOC wake-on events that can cause the USB Controller to
+ // immediately bring the ULPI PHY out of low power mode after setting PHCD
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_PORTSC1_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_USB2D, PORTSC1, WKCN,
+ DISBLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_USB2D, PORTSC1, WKDS,
+ DISBLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_USB2D, PORTSC1, WKOC,
+ DISBLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_PORTSC1_0, RegVal);
+
+ // before disabling clock.. put the phy to low power mode..
+ // enter low power suspend mode
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_PORTSC1_0);
+ RegVal = NV_FLD_SET_DRF_DEF(USB2_CONTROLLER_1, USB2D_PORTSC1, PHCD,
+ ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Usb2Otg, 1,
+ USB2_CONTROLLER_1_USB2D_PORTSC1_0, RegVal);
+
+ // check for the phy in suspend..
+ do
+ {
+ //Wait for the phy clock to stop or invalid
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 1 ,
+ USB2_IF_USB_SUSP_CTRL_0);
+
+ PhyClkValid = NV_DRF_VAL(USB2_IF, USB_SUSP_CTRL, USB_PHY_CLK_VALID,
+ RegVal);
+
+ if (!TimeOut)
+ {
+ break;
+ }
+ NvOsWaitUS(1);
+ TimeOut--;
+ } while (PhyClkValid);
+
+
+ //NvRmAnalogUsbInputParam_ConfigureUsbPhy
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB2_CE, DISABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0, RegVal);
+
+ s_IsUSBResetRequired = NV_TRUE;
+ }
+}
+
+
+static void
+NvRmPrivUsbfEnableIdInterrupt(
+ NvRmDeviceHandle hDevice)
+{
+ NvU32 RegVal = 0;
+
+ //enable ID interrupt for A cable detection
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, USB_PHY_VBUS_WAKEUP_ID,
+ ID_INT_EN, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0, RegVal);
+}
+
+static void
+NvRmPrivUsbfDisableIdInterrupt(
+ NvRmDeviceHandle hDevice)
+{
+ NvU32 RegVal = 0;
+
+ //disable the ID interrupt,
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, USB_PHY_VBUS_WAKEUP_ID,
+ ID_INT_EN, DISABLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, USB_PHY_VBUS_WAKEUP_ID,
+ ID_CHG_DET, SET, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0, RegVal);
+}
+
+static NvBool
+NvRmPrivUsbfIsIdSetToLow(
+ NvRmDeviceHandle hDevice)
+{
+ NvU32 RegVal = 0;
+ NvBool IdSetToLow = NV_FALSE;
+
+ // Check for A cable connection
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0);
+ if (!NV_DRF_VAL(APB_MISC_PP, USB_PHY_VBUS_WAKEUP_ID, ID_STS, RegVal))
+ {
+ IdSetToLow = NV_TRUE;
+ }
+
+ //Ack the ID interrupt,
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, USB_PHY_VBUS_WAKEUP_ID,
+ ID_CHG_DET, SET, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_USB_PHY_VBUS_WAKEUP_ID_0, RegVal);
+
+ return IdSetToLow;
+}
+static void NvRmUsbPrivEnableUsb2Clock( NvRmDeviceHandle hDevice )
+{
+ NvU32 RegVal;
+
+ // fist enable clocks to USB2
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB2_CE, ENABLE, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB_CE, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0, RegVal);
+
+ // wait 10 us
+ NvOsWaitUS(10);
+
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB2_RST, DISABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0, RegVal);
+
+ // wait 10 us
+ NvOsWaitUS(10);
+
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0);
+ RegVal = NV_FLD_SET_DRF_DEF(APB_MISC_PP, MISC_USB_CLK_RST_CTL,
+ MISC_USB2_RST, ENABLE, RegVal);
+ NV_REGW(hDevice, NvRmModuleID_Misc, 0,
+ APB_MISC_PP_MISC_USB_CLK_RST_CTL_0, RegVal);
+
+ // wait 10 us
+ NvOsWaitUS(10);
+}
+
+static NvError
+NvRmPrivUsbfControl(
+ NvRmDeviceHandle hDevice,
+ NvBool Enable,
+ NvU32 inst,
+ void *Config,
+ NvU32 ConfigLength)
+{
+ NvError error = NvSuccess;
+ NvRmAnalogUsbConfig *pUsbConf = NULL;
+ static NvBool s_IsUsb0PhyConfigured = NV_FALSE;
+ static NvBool s_IsUsb2PhyConfigured = NV_FALSE;
+ NvU32 TimeOut = 100000; // 100 milli seconds timeout before H/W gives up;
+ NvU32 PhyClkValid = 0;
+ NvU32 RegVal = 0;
+
+
+ const NvOdmUsbProperty *pUsbProperty = NULL;
+
+ NV_ASSERT(Config);
+
+ pUsbConf = (NvRmAnalogUsbConfig *)Config;
+
+ switch (pUsbConf->InParam)
+ {
+ case NvRmAnalogUsbInputParam_CheckCableStatus:
+ if (inst == 0)
+ {
+ pUsbConf->UsbCableDetected = NvRmPrivUsbfIsCableConnected(hDevice);
+ // At this time we don't know charger is connected or not
+ pUsbConf->UsbChargerDetected = NV_FALSE;
+ }
+ break;
+ case NvRmAnalogUsbInputParam_WaitForPhyClock:
+ if (inst == 0)
+ {
+ // Wait for PHY clock to settle
+ error = NvRmPrivUsbfWaitForPhyClock(hDevice, Enable);
+ }
+ if( inst == 2)
+ {
+ // wait for USB3 phy clock is settle
+ /**Wait until PHY clock comes up by checking for USB_PHY_CLK_VALID bit in
+ USB3_IF_USB_SUSP_CTRL register **/
+
+ do {
+ //Wait for the phy clock to become valid
+ RegVal = NV_REGR(hDevice, NvRmModuleID_Usb2Otg, 2, USB3_IF_USB_SUSP_CTRL_0);
+
+ PhyClkValid = NV_DRF_VAL(USB3_IF, USB_SUSP_CTRL, USB_PHY_CLK_VALID, RegVal);
+
+ if (!TimeOut)
+ {
+ break;
+ }
+ NvOsWaitUS(1);
+ TimeOut--;
+ } while (!PhyClkValid);
+ }
+
+ break;
+ case NvRmAnalogUsbInputParam_CheckChargerStatus:
+ if (inst == 0)
+ {
+ // Check whether the Dumb charger is detected
+ pUsbConf->UsbChargerDetected = NvRmPrivUsbfIsChargerDetected(hDevice, Enable);
+ }
+ break;
+ case NvRmAnalogUsbInputParam_ChargerDetection:
+ if (inst == 0)
+ {
+ // Enable Charger detection logic
+ NvRmPrivUsbfChargerDetection(hDevice, Enable);
+ }
+ break;
+ case NvRmAnalogUsbInputParam_ConfigureUsbPhy:
+ if (inst == 1) // for ULPI
+ {
+ NvRmUsbPrivConfigureUsbPhy(hDevice, Enable);
+ }
+ else
+ {
+ // UTMIP settings
+ if (Enable)
+ {
+ if ((inst == 0) && (s_IsUsb0PhyConfigured == NV_TRUE))
+ {
+ // If Usb0 Phy is already configure; nothing to do
+ return NvSuccess;
+ }
+ else if((inst == 2) && (s_IsUsb2PhyConfigured == NV_TRUE))
+ {
+ // If Usb2 Phy is already configure; nothing to do
+ return NvSuccess;
+ }
+ /** For AP20 instance 2 is utmip and we need to select this
+ interface **/
+ pUsbProperty = NvOdmQueryGetUsbProperty(NvOdmIoModule_Usb, inst);
+ if ((inst == 2) && (pUsbProperty->UsbInterfaceType ==
+ NvOdmUsbInterfaceType_Utmi))
+ {
+ // Select UTMIP3 incase of usb3 and UTMIP
+ error = NvRmPrivUsb3ConfigureUtmipPhy(hDevice);
+ if (error != NvSuccess)
+ {
+ return error;
+ }
+ s_IsUsb2PhyConfigured = NV_TRUE;
+ }
+ else
+ {
+ // Configure USB1 UTMIP Phy
+ error = NvRmPrivUsbfConfigureUtmipPhy(hDevice);
+ if (error != NvSuccess)
+ {
+ return error;
+ }
+ // Enable USB circuitry
+ NvRmPrivUsbfPowerControl(hDevice, inst, Enable);
+ s_IsUsb0PhyConfigured = NV_TRUE;
+ }
+ }
+ else
+ {
+ if (inst == 0) // for UTMIP1
+ {
+ // Disable power to the USB phy
+ NvRmPrivUsbfPowerControl(hDevice, inst, Enable);
+ // Enable VBUS interrupt when USB controller is OFF
+ NvRmPrivUsbfEnableVbusInterrupt(hDevice);
+ }
+ if (inst == 0)
+ s_IsUsb0PhyConfigured = NV_FALSE;
+ else if (inst == 2)
+ s_IsUsb2PhyConfigured = NV_FALSE;
+ }
+ }
+ break;
+ case NvRmAnalogUsbInputParam_SetUlpiNullTrimmers:
+ if (inst == 1)
+ {
+ NvRmPrivUsbfSetUlpiNullTrimmers(hDevice, inst);
+ }
+ break;
+ case NvRmAnalogUsbInputParam_SetUlpiLinkTrimmers:
+ if (inst == 1)
+ {
+ NvRmPrivUsbfSetUlpiLinkTrimmers(hDevice, inst);
+ }
+ break;
+ case NvRmAnalogUsbInputParam_VbusInterrupt:
+ if (inst == 0)
+ {
+ if (Enable)
+ {
+ // enable VBus Interrupt
+ NvRmPrivUsbfEnableVbusInterrupt(hDevice);
+ }
+ else
+ {
+ // disable VBUS interrupt
+ NvRmPrivUsbfDisableVbusInterrupt(hDevice);
+ }
+ }
+ break;
+ case NvRmAnalogUsbInputParam_ConfigureUlpiNullClock:
+ break;
+ case NvRmAnalogUsbInputParam_SetNullUlpiPinMux:
+ if(inst == 1)
+ {
+ NvRmSetModuleTristate(hDevice,
+ NVRM_MODULE_ID(NvRmModuleID_Usb2Otg, inst),
+ NV_FALSE);
+
+ // enb_usb2_clocks
+ NvRmUsbPrivEnableUsb2Clock(hDevice);
+ NvRmPrivUsbfUlpiClockControl(hDevice, inst, Enable);
+ }
+ break;
+ case NvRmAnalogUsbInputParam_IdInterrupt:
+ if (inst == 0)
+ {
+ if (Enable)
+ {
+ // enable ID Interrupt
+ NvRmPrivUsbfEnableIdInterrupt(hDevice);
+ }
+ else
+ {
+ // disable ID interrupt
+ NvRmPrivUsbfDisableIdInterrupt(hDevice);
+ }
+ }
+ break;
+ case NvRmAnalogUsbInputParam_CheckIdStatus:
+ if (inst == 0)
+ {
+ pUsbConf->UsbIdDetected = NvRmPrivUsbfIsIdSetToLow(hDevice);
+ }
+ break;
+ default:
+ NV_ASSERT(NV_FALSE);
+ break;
+ }
+
+ return error;
+}
+
+
+NvError
+NvRmAnalogInterfaceControl(
+ NvRmDeviceHandle hDevice,
+ NvRmAnalogInterface Interface,
+ NvBool Enable,
+ void *Config,
+ NvU32 ConfigLength )
+{
+ NvError err = NvSuccess;
+ NvU32 id;
+ NvU32 inst;
+
+ NV_ASSERT( hDevice );
+
+ id = NVRM_ANALOG_INTERFACE_ID( Interface );
+ inst = NVRM_ANALOG_INTERFACE_INSTANCE( Interface );
+
+ NvOsMutexLock( hDevice->mutex );
+
+ switch( id ) {
+ case NvRmAnalogInterface_Dsi:
+ break;
+ case NvRmAnalogInterface_ExternalMemory:
+ break;
+ case NvRmAnalogInterface_Hdmi:
+ break;
+ case NvRmAnalogInterface_Lcd:
+ break;
+ case NvRmAnalogInterface_Uart:
+ break;
+ case NvRmAnalogInterface_Usb:
+ err = NvRmPrivUsbfControl( hDevice, Enable, inst, Config,
+ ConfigLength );
+ break;
+ case NvRmAnalogInterface_Sdio:
+ break;
+ case NvRmAnalogInterface_Tv:
+ err = NvRmPrivTvDcControl( hDevice, Enable, inst, Config,
+ ConfigLength );
+ break;
+ case NvRmAnalogInterface_VideoInput:
+ err = NvRmPrivVideoInputControl( hDevice, Enable, inst, Config,
+ ConfigLength);
+ break;
+ default:
+ NV_ASSERT(!"Unknown Analog interface passed. ");
+ }
+
+ NvOsMutexUnlock( hDevice->mutex );
+
+ return err;
+}
+
+NvBool
+NvRmUsbIsConnected(
+ NvRmDeviceHandle hDevice)
+{
+ //Do nothing
+ return NV_TRUE;
+}
+
+NvU32
+NvRmUsbDetectChargerState(
+ NvRmDeviceHandle hDevice,
+ NvU32 wait)
+{
+ //Do nothing
+ return NvOdmUsbChargerType_UsbHost;
+}
+
+NvU8
+NvRmAnalogGetTvDacConfiguration(
+ NvRmDeviceHandle hDevice,
+ NvRmAnalogTvDacType Type)
+{
+ NvU8 RetVal = 0;
+ NvU32 OldRegVal = 0;
+ NvU32 NewRegVal = 0;
+
+ NV_ASSERT(hDevice);
+
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Enable fuse clock
+ NvRmPowerModuleClockControl(hDevice, NvRmModuleID_Fuse, 0, NV_TRUE);
+#endif
+
+ // Enable fuse values to be visible before reading the fuses.
+ OldRegVal = NV_REGR(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0);
+ NewRegVal = NV_FLD_SET_DRF_NUM(CLK_RST_CONTROLLER, MISC_CLK_ENB,
+ CFG_ALL_VISIBLE, 1, OldRegVal);
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0, NewRegVal);
+
+ switch (Type)
+ {
+ case NvRmAnalogTvDacType_CRT:
+ RetVal = NV_REGR(hDevice, NvRmModuleID_Fuse, 0, FUSE_DAC_CRT_CALIB_0);
+ break;
+ case NvRmAnalogTvDacType_SDTV:
+ RetVal = NV_REGR(hDevice, NvRmModuleID_Fuse, 0, FUSE_DAC_SDTV_CALIB_0);
+ break;
+ case NvRmAnalogTvDacType_HDTV:
+ RetVal = NV_REGR(hDevice, NvRmModuleID_Fuse, 0, FUSE_DAC_HDTV_CALIB_0);
+ break;
+ default:
+ NV_ASSERT(!"Unsupported this Dac type");
+ break;
+ }
+
+ // Disable fuse values visibility
+ NV_REGW(hDevice, NvRmPrivModuleID_ClockAndReset, 0,
+ CLK_RST_CONTROLLER_MISC_CLK_ENB_0, OldRegVal);
+
+#if NV_USE_FUSE_CLOCK_ENABLE
+ // Disable fuse clock
+ NvRmPowerModuleClockControl(hDevice, NvRmModuleID_Fuse, 0, NV_FALSE);
+#endif
+
+ return RetVal;
+}
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_dma_hw_private.c b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_dma_hw_private.c
new file mode 100644
index 000000000000..d42c02701558
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_dma_hw_private.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * DMA Resource manager private API for Hw access </b>
+ *
+ * @b Description: Implements the private interface of the nnvrm dma to access
+ * the hw apb/ahb dma register.
+ *
+ * This files implements the API for accessing the register of the Dma
+ * controller and configure the dma transfers for Ap15.
+ */
+
+#include "nvrm_dma.h"
+#include "rm_dma_hw_private.h"
+#include "ap20/arapbdma.h"
+#include "ap20/arapbdmachan.h"
+#include "nvrm_drf.h"
+#include "nvassert.h"
+#include "nvrm_hardware_access.h"
+
+#define APBDMACHAN_READ32(pVirtBaseAdd, reg) \
+ NV_READ32((pVirtBaseAdd) + ((APBDMACHAN_CHANNEL_0_##reg##_0)/4))
+#define APBDMACHAN_WRITE32(pVirtBaseAdd, reg, val) \
+ do { \
+ NV_WRITE32(((pVirtBaseAdd) + ((APBDMACHAN_CHANNEL_0_##reg##_0)/4)), (val)); \
+ } while(0)
+
+
+static void
+ConfigureDmaRequestor(
+ DmaChanRegisters *pDmaChRegs,
+ NvRmDmaModuleID DmaReqModuleId,
+ NvU32 DmaReqInstId)
+{
+ // Check for the dma module Id and based on the dma module Id, decide
+ // the trigger requestor source.
+ switch (DmaReqModuleId)
+ {
+ /// Specifies the dma module Id for memory
+ case NvRmDmaModuleID_Memory:
+ // Dma transfer will be from memory to memory.
+ // Use the reset value only for the ahb data transfer.
+ break;
+
+
+ case NvRmDmaModuleID_I2s:
+ // Dma requestor is the I2s controller.
+ NV_ASSERT(DmaReqInstId < 2);
+ if (DmaReqInstId == 0)
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, I2S_1, pDmaChRegs->ControlReg);
+ else
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, I2S2_1, pDmaChRegs->ControlReg);
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, FLOW, ENABLE, pDmaChRegs->ControlReg);
+ pDmaChRegs->ApbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ APB_SEQ, APB_BUS_WIDTH, BUS_WIDTH_32,
+ pDmaChRegs->ApbSequenceReg);
+ break;
+
+ case NvRmDmaModuleID_Uart:
+ // Dma requestor is the uart.
+ NV_ASSERT(DmaReqInstId < 5);
+ switch (DmaReqInstId)
+ {
+ default:
+ case 0:
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, UART_A, pDmaChRegs->ControlReg);
+ break;
+ case 1:
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, UART_B, pDmaChRegs->ControlReg);
+ break;
+ case 2:
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, UART_C, pDmaChRegs->ControlReg);
+ break;
+ case 3:
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, UART_D, pDmaChRegs->ControlReg);
+ break;
+ case 4:
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, UART_E, pDmaChRegs->ControlReg);
+ break;
+ }
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, FLOW, ENABLE, pDmaChRegs->ControlReg);
+ pDmaChRegs->ApbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ APB_SEQ, APB_BUS_WIDTH, BUS_WIDTH_8,
+ pDmaChRegs->ApbSequenceReg);
+ break;
+
+ case NvRmDmaModuleID_Vfir:
+ // Dma requestor is the vfir.
+ NV_ASSERT(DmaReqInstId < 1);
+ if (DmaReqInstId == 1)
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, UART_B, pDmaChRegs->ControlReg);
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, FLOW, ENABLE, pDmaChRegs->ControlReg);
+ pDmaChRegs->ApbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ APB_SEQ, APB_BUS_WIDTH, BUS_WIDTH_32,
+ pDmaChRegs->ApbSequenceReg);
+ break;
+
+ case NvRmDmaModuleID_Mipi:
+ // Dma requestor is the Mipi controller.
+ NV_ASSERT(DmaReqInstId < 1);
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, MIPI, pDmaChRegs->ControlReg);
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, FLOW, ENABLE, pDmaChRegs->ControlReg);
+ pDmaChRegs->ApbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ APB_SEQ, APB_BUS_WIDTH, BUS_WIDTH_32,
+ pDmaChRegs->ApbSequenceReg);
+ break;
+
+ case NvRmDmaModuleID_Spi:
+ // Dma requestor is the Spi controller.
+ NV_ASSERT(DmaReqInstId < 1);
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, SPI, pDmaChRegs->ControlReg);
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_NUM(APBDMACHAN_CHANNEL_0,
+ CSR, TRIG_SEL, 0, pDmaChRegs->ControlReg);
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, FLOW, ENABLE, pDmaChRegs->ControlReg);
+ pDmaChRegs->ApbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ APB_SEQ, APB_BUS_WIDTH, BUS_WIDTH_32,
+ pDmaChRegs->ApbSequenceReg);
+ break;
+
+ case NvRmDmaModuleID_Slink:
+ // Dma requestor is the Slink controller.
+ NV_ASSERT(DmaReqInstId < 3);
+ if (DmaReqInstId == 0)
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, SL2B1, pDmaChRegs->ControlReg);
+ else if (DmaReqInstId == 1)
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, SL2B2, pDmaChRegs->ControlReg);
+ else
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, SL2B3, pDmaChRegs->ControlReg);
+
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_NUM(APBDMACHAN_CHANNEL_0,
+ CSR, TRIG_SEL, 0, pDmaChRegs->ControlReg);
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, FLOW, ENABLE, pDmaChRegs->ControlReg);
+ pDmaChRegs->ApbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ APB_SEQ, APB_BUS_WIDTH, BUS_WIDTH_32,
+ pDmaChRegs->ApbSequenceReg);
+ break;
+
+ case NvRmDmaModuleID_Spdif:
+ // Dma requestor is the Spdif controller.
+ NV_ASSERT(DmaReqInstId < 1);
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, SPD_I, pDmaChRegs->ControlReg);
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, FLOW, ENABLE, pDmaChRegs->ControlReg);
+ pDmaChRegs->ApbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ APB_SEQ, APB_BUS_WIDTH, BUS_WIDTH_32,
+ pDmaChRegs->ApbSequenceReg);
+ break;
+
+ case NvRmDmaModuleID_I2c:
+ // Dma requestor is the I2c controller.
+ NV_ASSERT(DmaReqInstId < 3);
+ switch (DmaReqInstId)
+ {
+ default:
+ case 0:
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, I2C,
+ pDmaChRegs->ControlReg);
+ break;
+ case 1:
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, I2C2,
+ pDmaChRegs->ControlReg);
+ break;
+ case 2:
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, I2C3,
+ pDmaChRegs->ControlReg);
+ break;
+ }
+
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, FLOW, ENABLE, pDmaChRegs->ControlReg);
+ pDmaChRegs->ApbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ APB_SEQ, APB_BUS_WIDTH, BUS_WIDTH_32,
+ pDmaChRegs->ApbSequenceReg);
+ break;
+
+ case NvRmDmaModuleID_Dvc:
+ // Dma requestor is the I2c controller.
+ NV_ASSERT(DmaReqInstId < 1);
+
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, DVC_I2C, pDmaChRegs->ControlReg);
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, FLOW, ENABLE, pDmaChRegs->ControlReg);
+ pDmaChRegs->ApbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ APB_SEQ, APB_BUS_WIDTH, BUS_WIDTH_32,
+ pDmaChRegs->ApbSequenceReg);
+ break;
+
+
+ default:
+ NV_ASSERT(!"Invalid module");
+ }
+}
+
+/**
+ * Configure the Apb dma register as per clients information.
+ * This function do the register setting based on device Id and will be stored
+ * in the dma handle. This information will be used when there is dma transfer
+ * request and want to configure the dma controller registers.
+ */
+static void
+InitApbDmaRegisters(
+ DmaChanRegisters *pDmaChRegs,
+ NvRmDmaModuleID DmaReqModuleId,
+ NvU32 DmaReqInstId)
+{
+ pDmaChRegs->pHwDmaChanReg = NULL;
+
+ // Set the dma register of dma handle to their power on reset values.
+ pDmaChRegs->ControlReg = NV_RESETVAL(APBDMACHAN_CHANNEL_0, CSR);
+ pDmaChRegs->AhbSequenceReg = NV_RESETVAL(APBDMACHAN_CHANNEL_0, AHB_SEQ);
+ pDmaChRegs->ApbSequenceReg = NV_RESETVAL(APBDMACHAN_CHANNEL_0,APB_SEQ);
+
+ // Configure the dma register for the OnceMode
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR,
+ ONCE, SINGLE_BLOCK, pDmaChRegs->ControlReg);
+
+ // Configure the dma register for enabling the interrupt so that it will generate the interrupt
+ // after transfer completes.
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR,
+ IE_EOC, ENABLE, pDmaChRegs->ControlReg);
+
+ // Configure the dma register for interrupting the cpu only.
+ pDmaChRegs->AhbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ AHB_SEQ, INTR_ENB, CPU, pDmaChRegs->AhbSequenceReg);
+
+ // Configure the dma registers as per requestor information.
+ ConfigureDmaRequestor(pDmaChRegs, DmaReqModuleId, DmaReqInstId);
+}
+
+/**
+ * Set the data transfer mode for the dma transfer.
+ */
+static void
+SetApbDmaTransferMode(
+ DmaChanRegisters *pDmaChRegs,
+ NvBool IsContinuousMode,
+ NvBool IsDoubleBuffMode)
+{
+ // Configure the dma register for the Continuous Mode
+ if (IsContinuousMode)
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, ONCE, MULTIPLE_BLOCK, pDmaChRegs->ControlReg);
+ else
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, ONCE, SINGLE_BLOCK, pDmaChRegs->ControlReg);
+
+ // Configure the dma register for the double buffering Mode
+ if (IsDoubleBuffMode)
+ pDmaChRegs->AhbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ AHB_SEQ, DBL_BUF, RELOAD_FOR_2X_BLOCKS,
+ pDmaChRegs->AhbSequenceReg);
+ else
+ pDmaChRegs->AhbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ AHB_SEQ, DBL_BUF, RELOAD_FOR_1X_BLOCKS,
+ pDmaChRegs->AhbSequenceReg);
+}
+
+/**
+ * Set the Apb dma direction of data transfer.
+ */
+static void
+SetApbDmaDirection(
+ DmaChanRegisters *pDmaChRegs,
+ NvBool IsSourceAddPerType)
+{
+ if (IsSourceAddPerType)
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, DIR, AHB_WRITE, pDmaChRegs->ControlReg);
+ else
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, DIR, AHB_READ, pDmaChRegs->ControlReg);
+}
+
+void NvRmPrivDmaInitAp15DmaHwInterfaces(DmaHwInterface *pApbDmaInterface)
+{
+
+ pApbDmaInterface->DmaHwInitRegistersFxn = InitApbDmaRegisters;
+ pApbDmaInterface->DmaHwSetTransferModeFxn = SetApbDmaTransferMode;
+ pApbDmaInterface->DmaHwSetDirectionFxn = SetApbDmaDirection;
+}
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_dma_intr.c b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_dma_intr.c
new file mode 100644
index 000000000000..0e8761264287
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_dma_intr.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * DMA Resource manager private API for Hw access </b>
+ *
+ * @b Description: Implements the private interface of the hw access NvRM DMA.
+ * This files implements the API for accessing the register of the AP15 Dma
+ * controller.
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvrm_interrupt.h"
+#include "nvassert.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_processor.h"
+#include "nvrm_drf.h"
+#include "ap15/arapbdma.h"
+#include "rm_dma_hw_private.h"
+
+#define NV_APB_DMA_REGR(rm,reg) NV_REGR(rm, NvRmPrivModuleID_ApbDma, 0, APBDMA_##reg##_0)
+#define NV_APB_DMA_REGW(rm,reg,data) NV_REGW(rm, NvRmPrivModuleID_ApbDma, 0, APBDMA_##reg##_0, data)
+
+NvU32 NvRmPrivDmaInterruptDecode(NvRmDeviceHandle hRmDevice )
+{
+ NvU32 Channel;
+ NvU32 Reg;
+
+ // Read the APB DMA channel interrupt status register.
+ Reg = NV_APB_DMA_REGR(hRmDevice, IRQ_STA_CPU);
+
+ // Get the interrupting channel number.
+ Channel = 31 - CountLeadingZeros(Reg);
+
+ // Get the interrupt disable mask.
+ Reg = 1 << Channel;
+
+ // Disable the source.
+ NV_APB_DMA_REGW(hRmDevice, IRQ_MASK_CLR, Reg);
+
+ return Channel;
+}
+
+void NvRmPrivDmaInterruptEnable(NvRmDeviceHandle hRmDevice, NvU32 Channel, NvBool Enable )
+{
+ NvU32 Reg;
+
+ // Generate the channel mask.
+ Reg = 1 << Channel;
+
+ if (Enable)
+ {
+ // Enable the channel interrupt.
+ NV_APB_DMA_REGW(hRmDevice, IRQ_MASK_SET, Reg);
+ }
+ else
+ {
+ // Disable the channel interrupt.
+ NV_APB_DMA_REGW(hRmDevice, IRQ_MASK_CLR, Reg);
+ }
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_gpio_vi.c b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_gpio_vi.c
new file mode 100644
index 000000000000..dfad64c856c0
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_gpio_vi.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "ap15/ap15rm_gpio_vi.h"
+#include "nvrm_gpio_private.h"
+#include "nvassert.h"
+#include "nvos.h"
+#include "ap15/arvi.h"
+#include "nvodm_query_discovery.h"
+#include "nvrm_pmu.h"
+
+#define NV_ENABLE_VI_POWER_RAIL 1
+
+static NvU32 s_ViRegState = 0;
+static NvU32 s_ViPowerID = 0;
+static NvU32 s_PowerClientRefCount = 0;
+static NvOdmPeripheralConnectivity const *s_pConnectivity = NULL;
+
+// Use a boolean array for easy lookup of
+// which pins are available. Initialize the useable
+// ones to TRUE. Only hazard, is one could release
+// an invalid pin, then come back and acquire it.
+// but, that would just be dumb, and it is their own
+// fault for being dumb.
+static NvBool s_AvailableViPinList[] =
+{
+ NV_TRUE, // VGP0
+ NV_TRUE, // VD10
+ NV_TRUE, // VD11
+ NV_TRUE, // VGP3
+ NV_TRUE, // VGP4
+ NV_TRUE, // VGP5
+ NV_TRUE, // VGP6
+};
+
+NvError
+NvRmPrivGpioViAcquirePinHandle(
+ NvRmDeviceHandle hRm,
+ NvU32 pinNumber)
+{
+ NvU32 addr = VI_PIN_OUTPUT_ENABLE_0*4;
+ NvU32 data = 0;
+ NvError status;
+
+ NV_ASSERT(hRm != NULL);
+
+ if (pinNumber >= NV_ARRAY_SIZE(s_AvailableViPinList))
+ {
+ return NvError_BadValue;
+ }
+
+ // Track the VGP's that VI has
+ if (!s_AvailableViPinList[pinNumber])
+ {
+ return NvError_AlreadyAllocated;
+ }
+
+ // In order to ensure that we don't do all these Power calls more than
+ // once, refcount it. This function and it's inverse (Acquire/Release)
+ // are protected by a mutex one level up, so this refcount is safe.
+ if (s_PowerClientRefCount == 0)
+ {
+ // turn on vi clock, reset, and power
+ status = NvRmPowerRegister(hRm, NULL, &s_ViPowerID);
+ if (status != NvSuccess)
+ goto power_stuff_failed;
+
+ status = NvRmPowerVoltageControl( hRm,
+ NvRmModuleID_Vi,
+ s_ViPowerID,
+ NvRmVoltsUnspecified,
+ NvRmVoltsUnspecified,
+ NULL, 0, NULL);
+ if (status != NvSuccess)
+ goto power_stuff_failed;
+
+ status = NvRmPowerModuleClockControl(hRm,
+ NvRmModuleID_Vi,
+ s_ViPowerID,
+ NV_TRUE);
+ if (status != NvSuccess)
+ goto power_stuff_failed;
+
+ status = NvRmPowerModuleClockConfig(hRm,
+ NvRmModuleID_Vi,
+ s_ViPowerID,
+ NvRmFreqUnspecified,
+ NvRmFreqUnspecified,
+ NULL, 0, NULL,
+ NvRmClockConfig_ExternalClockForPads |
+ NvRmClockConfig_InternalClockForCore);
+ if (status != NvSuccess)
+ goto power_stuff_failed;
+
+#if NV_ENABLE_VI_POWER_RAIL
+ status = NvRmPrivGpioViPowerRailConfig(hRm, NV_TRUE);
+ if (status != NvSuccess)
+ goto power_stuff_failed;
+#endif
+ status = NvRmSetModuleTristate(hRm, NvRmModuleID_Vi, NV_FALSE);
+ if (status != NvSuccess)
+ goto power_stuff_failed;
+ }
+
+ s_PowerClientRefCount++;
+
+ // We will just go ahead and enable all the output pins
+ // that can be used.
+ #define ENABLE_PIN(_name_) \
+ (VI_PIN_OUTPUT_ENABLE_0_##_name_##_OUTPUT_ENABLE_SHIFT)
+ data |= 1 << ENABLE_PIN(VGP6);
+ data |= 1 << ENABLE_PIN(VGP5);
+ data |= 1 << ENABLE_PIN(VGP4);
+ data |= 1 << ENABLE_PIN(VGP3);
+ data |= 1 << ENABLE_PIN(VD11);
+ data |= 1 << ENABLE_PIN(VD10);
+ data |= 1 << ENABLE_PIN(VGP0);
+ #undef ENABLE_PIN
+ NV_REGW(hRm, NvRmModuleID_Vi, 0, addr, data);
+
+ s_AvailableViPinList[pinNumber] = NV_FALSE;
+ return NvSuccess;
+
+power_stuff_failed:
+
+ // TODO: robustly handle if a few NvRmPower (etc) calls worked before we
+ // hit a failure. Possibly need to undo each call that succeeded in
+ // reverse order?
+
+ if (s_ViPowerID)
+ {
+ NvRmPowerUnRegister(hRm, s_ViPowerID);
+ s_ViPowerID = 0;
+ }
+
+ return status;
+}
+
+void NvRmPrivGpioViReleasePinHandles(
+ NvRmDeviceHandle hRm,
+ NvU32 pin)
+{
+ NvError status;
+
+ // if already available, return
+ if (s_AvailableViPinList[pin])
+ return;
+
+ // release the pin
+ s_AvailableViPinList[pin] = NV_TRUE;
+
+ s_PowerClientRefCount--;
+
+ if (s_PowerClientRefCount == 0)
+ {
+ // turn off vi clock, reset, and power
+ NV_ASSERT(s_ViPowerID);
+#if NV_ENABLE_VI_POWER_RAIL
+ /* Disable power rail */
+ status = NvRmPrivGpioViPowerRailConfig(hRm, NV_FALSE);
+ NV_ASSERT((status == NvSuccess) && "PowerRailConfig failed");
+#endif
+ /* Power down vi block */
+ // Disable module clock
+ status = NvRmPowerModuleClockControl(hRm,
+ NvRmModuleID_Vi,
+ s_ViPowerID,
+ NV_FALSE);
+ NV_ASSERT((status == NvSuccess) && "PowerModuleClockControl failed");
+
+ // Disable module power
+ status = NvRmPowerVoltageControl(hRm,
+ NvRmModuleID_Vi,
+ s_ViPowerID,
+ NvRmVoltsOff,
+ NvRmVoltsOff,
+ NULL, 0, NULL);
+ NV_ASSERT((status == NvSuccess) && "PowerVoltageControl failed");
+
+ // Unregister itself as power client
+ NvRmPowerUnRegister(hRm, s_ViPowerID);
+ s_ViPowerID = 0;
+
+ status = NvRmSetModuleTristate(hRm, NvRmModuleID_Vi, NV_TRUE);
+ NV_ASSERT((status == NvSuccess) && "SetModuleTrisate failed");
+ }
+ return;
+}
+
+static NvU32 TranslatePinToViRegShift(NvU32 pin)
+{
+ NvU32 shift;
+ if ((pin == 1) || (pin == 2)) // mapped to VD10 and VD11
+ {
+ shift = (pin-1) + VI_PIN_OUTPUT_DATA_0_VD10_OUTPUT_DATA_SHIFT;
+ }
+ else if (pin <= 6) // only VGP0 to VGP6 exist
+ {
+ shift = pin + VI_PIN_OUTPUT_DATA_0_VGP0_OUTPUT_DATA_SHIFT;
+ }
+ else
+ {
+ shift = 0xFFFFFFFF; // illegal pin choice
+ }
+ return shift;
+}
+
+NvU32 NvRmPrivGpioViReadPins(
+ NvRmDeviceHandle hRm,
+ NvU32 pin )
+{
+ NvU32 shift = TranslatePinToViRegShift(pin);
+ // just return the shadowed value for now,
+ // since we aren't going to configure the vi gpio for input
+ // as it could potentially conflict with the sensor pins
+ if (shift == 0xFFFFFFFF)
+ {
+ return 0; // illegal pin choice
+ }
+ else
+ {
+ return (s_ViRegState >> shift) & 0x1;
+ }
+}
+
+void NvRmPrivGpioViWritePins(
+ NvRmDeviceHandle hRm,
+ NvU32 pin,
+ NvU32 pinState )
+{
+ NvU32 addr = VI_PIN_OUTPUT_DATA_0*4;
+ NvU32 shift = TranslatePinToViRegShift(pin);
+
+ if (shift == 0xFFFFFFFF)
+ {
+ return; // illegal pin choice
+ }
+
+ s_ViRegState &= ~(1 << shift); // clear
+ if (pinState)
+ {
+ s_ViRegState |= 1 << shift; // set
+ }
+ // write s_ViRegState to VI
+ NV_REGW(hRm, NvRmModuleID_Vi, 0, addr, s_ViRegState);
+ return;
+}
+
+NvBool NvRmPrivGpioViDiscover(
+ NvRmDeviceHandle hRm)
+{
+ NvU64 guid = NV_VDD_VI_ODM_ID;
+
+ if (s_pConnectivity)
+ {
+ return NV_TRUE;
+ }
+
+ /* get the connectivity info */
+ s_pConnectivity = NvOdmPeripheralGetGuid( guid );
+ if ( !s_pConnectivity )
+ {
+ return NV_FALSE;
+ }
+
+ return NV_TRUE;
+}
+
+NvError
+NvRmPrivGpioViPowerRailConfig(
+ NvRmDeviceHandle hRm,
+ NvBool Enable)
+{
+ NvU32 i;
+ NvRmPmuVddRailCapabilities RailCaps;
+ NvU32 SettlingTime;
+
+ if ( !NvRmPrivGpioViDiscover(hRm) )
+ {
+ return NvError_ModuleNotPresent;
+ }
+
+ for (i = 0; i < (s_pConnectivity->NumAddress); i++)
+ {
+ // Search for the vdd rail entry
+ if (s_pConnectivity->AddressList[i].Interface == NvOdmIoModule_Vdd)
+ {
+ if (Enable)
+ {
+ NvRmPmuGetCapabilities(hRm,
+ s_pConnectivity->AddressList[i].Address, &RailCaps);
+ NvRmPmuSetVoltage(hRm,
+ s_pConnectivity->AddressList[i].Address,
+ RailCaps.requestMilliVolts, &SettlingTime);
+ }
+ else
+ {
+ NvRmPmuSetVoltage(hRm,
+ s_pConnectivity->AddressList[i].Address,
+ ODM_VOLTAGE_OFF, &SettlingTime);
+ }
+ if (SettlingTime)
+ NvOsWaitUS(SettlingTime);
+ }
+ }
+ return NvSuccess;
+
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_gpio_vi.h b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_gpio_vi.h
new file mode 100644
index 000000000000..07d1c055f2b8
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_gpio_vi.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef AP15RM_GPIO_VI_H
+#define AP15RM_GPIO_VI_H
+
+#include "nvcommon.h"
+#include "nvrm_init.h"
+
+NvError
+NvRmPrivGpioViAcquirePinHandle(
+ NvRmDeviceHandle hRm,
+ NvU32 pinNumber);
+
+void NvRmPrivGpioViReleasePinHandles(
+ NvRmDeviceHandle hRm,
+ NvU32 pin);
+
+NvU32 NvRmPrivGpioViReadPins(
+ NvRmDeviceHandle hRm,
+ NvU32 pin );
+
+void NvRmPrivGpioViWritePins(
+ NvRmDeviceHandle hRm,
+ NvU32 pin,
+ NvU32 pinState );
+NvError
+NvRmPrivGpioViPowerRailConfig(
+ NvRmDeviceHandle hRm,
+ NvBool Enable);
+
+NvBool NvRmPrivGpioViDiscover(
+ NvRmDeviceHandle hRm);
+
+#endif /* AP15RM_GPIO_VI_H */
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_i2c.c b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_i2c.c
new file mode 100644
index 000000000000..7373390ac80b
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_i2c.c
@@ -0,0 +1,742 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/** @file
+ * @brief <b>NVIDIA Driver Development Kit: I2C API</b>
+ *
+ * @b Description: Contains the NvRM I2C implementation.
+ */
+
+#include "nvrm_i2c.h"
+#include "nvrm_i2c_private.h"
+#include "nvrm_drf.h"
+#include "nvos.h"
+#include "nvrm_module.h"
+#include "ap20/ari2c.h"
+#include "nvrm_hardware_access.h"
+#include "nvrm_power.h"
+#include "nvrm_interrupt.h"
+#include "nvassert.h"
+#include "ap20/ardvc.h"
+#include "nvodm_query_pinmux.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_hwintf.h"
+
+
+#define I2C_PACKET_SIZE 8
+
+/* Register access Macros */
+#define I2C_REGR(c, reg) NV_REGR((c)->hRmDevice, (c)->ModuleId, (c)->Instance, \
+ (c)->I2cRegisterOffset + I2C_##reg##_0 ); \
+
+#define I2C_REGW(c, reg, val) NV_REGW((c)->hRmDevice, (c)->ModuleId, (c)->Instance, \
+ ((c)->I2cRegisterOffset + I2C_##reg##_0), (val) );
+
+#define DVC_REGR(c, reg) NV_REGR((c)->hRmDevice, NvRmModuleID_Dvc, (c)->Instance, DVC_##reg##_0)
+#define DVC_REGW(c, reg, val) NV_REGW((c)->hRmDevice, NvRmModuleID_Dvc, (c)->Instance, DVC_##reg##_0, val )
+
+static void I2cIsr(void* args)
+{
+ NvRmI2cController* c = args;
+ NvU32 status_register;
+ NvU32 FailedByte;
+
+ // Read the status register
+ status_register = I2C_REGR(c, I2C_STATUS);
+
+ if (status_register)
+ {
+ FailedByte = (NV_DRF_VAL(I2C, I2C_STATUS, CMD1_STAT, status_register) +
+ NV_DRF_VAL(I2C, I2C_STATUS, CMD2_STAT, status_register));
+ if (FailedByte == 0)
+ {
+ NV_ASSERT(!"Something wrong with the controller, got interrupt when the controller is busy");
+ }
+ if (FailedByte == 1)
+ {
+ /* If the first byte is failed then, it means there is no ACK on the
+ * address phase.i.e there is no device with that address */
+ c->I2cTransferStatus = NvError_I2cDeviceNotFound;
+ }
+ else
+ {
+ /* It failed on some subsequent bytes, just report the transcation
+ * as failed */
+ if (c->TransactionType == I2C_READ)
+ {
+ c->I2cTransferStatus = NvError_I2cReadFailed;
+ }
+ else
+ {
+ c->I2cTransferStatus = NvError_I2cWriteFailed;
+ }
+ }
+ NvOsSemaphoreSignal(c->I2cSyncSemaphore);
+ NvRmInterruptDone(c->I2CInterruptHandle);
+ return;
+ }
+
+ c->I2cTransferStatus = NvSuccess;
+ NvOsSemaphoreSignal(c->I2cSyncSemaphore);
+
+ NvRmInterruptDone(c->I2CInterruptHandle);
+}
+
+static void DvcIsr(void* args)
+{
+ NvRmI2cController* c = args;
+
+ // The DVC module interrupt is not cleared until the DVC_STATUS_REG0 register
+ // is written
+ DVC_REGW(c, STATUS_REG, NV_DRF_NUM(DVC, STATUS_REG, I2C_DONE_INTR, 1));
+ I2cIsr(args);
+}
+
+static void
+NvRmPrivI2cOalPoll(
+ NvRmI2cController *c)
+{
+ NvU32 busy = 1;
+ NvU32 status_register = 0;
+ NvU32 FailedByte;
+ NvU32 count;
+ NvU32 timeout = c->timeout;
+
+ /* Assume success as a default condition */
+ c->I2cTransferStatus = NvSuccess;
+
+ do
+ {
+ count = 20;
+ while (count)
+ {
+ /* Assume a best case transfer of 400KHz I2C clock and 2 byte transfer:
+ * (i.e 1 address byte and 1 data byte )
+ * It should complete in around 50 micro sec */
+ NvOsWaitUS(50);
+ status_register = I2C_REGR(c, I2C_STATUS);
+ busy = NV_DRF_VAL(I2C, I2C_STATUS, BUSY, status_register);
+ if (busy == 0)
+ {
+ goto done_polling;
+ }
+ count -= 1;
+ }
+ /* Above loop takes around 1 msec */
+ } while (timeout-- );
+
+done_polling:
+
+ if (busy)
+ {
+ /* Something bad happened, controller cannot complete the transaction in
+ * the time specified. */
+ c->I2cTransferStatus = NvError_Timeout;
+ } else
+ {
+ if (c->ModuleId == NvRmModuleID_Dvc)
+ {
+ DVC_REGW(c, STATUS_REG, NV_DRF_NUM(DVC, STATUS_REG, I2C_DONE_INTR, 1));
+ }
+
+ /* Transfer completed, check the status */
+ FailedByte = NV_DRF_VAL(I2C, I2C_STATUS, CMD1_STAT, status_register) +
+ NV_DRF_VAL(I2C, I2C_STATUS, CMD2_STAT, status_register);
+
+ if (FailedByte != 0)
+ {
+ if (FailedByte == 1)
+ {
+ /* If the first byte is failed then, it means there is no ACK on the
+ * address phase.i.e there is no device with that address */
+ c->I2cTransferStatus = NvError_I2cDeviceNotFound;
+ }
+ else
+ {
+ /* It failed on some subsequent bytes, just report the transcation
+ * as failed */
+ if (c->TransactionType == I2C_READ)
+ {
+ c->I2cTransferStatus = NvError_I2cReadFailed;
+ } else
+ {
+ c->I2cTransferStatus = NvError_I2cWriteFailed;
+ }
+ }
+ }
+ }
+ return;
+}
+
+static NvBool AP15RmI2cGetGpioPins(
+ NvRmI2cController *c,
+ NvU32 I2cPinMap,
+ NvU32 *Scl,
+ NvU32 *Sda)
+{
+ NvU32 SclPin = 0;
+ NvU32 SdaPin = 0;
+ NvU32 SclPort = 0;
+ NvU32 SdaPort = 0;
+ NvBool Result = NV_TRUE;
+
+ NV_ASSERT((Scl != NULL) && (Sda != NULL));
+
+ // FIXME: All of this should be moved over to the pin mux module,
+ // rather than the I2C module.
+ if (c->ModuleId == NvRmModuleID_I2c)
+ {
+ switch ((c->Instance<<4) | I2cPinMap)
+ {
+ case ((0<<4) | 1):
+ SclPin = 4;
+ SdaPin = 5;
+ SclPort = 'c' - 'a';
+ SdaPort = 'c' - 'a';
+ break;
+ case ((0<<4) | 2):
+ SclPin = 5;
+ SdaPin = 6;
+ SclPort = 'k' - 'a';
+ SdaPort = 'k' - 'a';
+ break;
+ case ((0<<4) | 3):
+ SclPin = 2;
+ SdaPin = 3;
+ SclPort = 'w' - 'a';
+ SdaPort = 'w' - 'a';
+ break;
+ /* NOTE: The pins used in Pin Map 1 do not have a GPIO controller
+ * connected to them (VGP pins), so the software I2C implementation
+ * is not supported for this pin mux configuration */
+ case ((1<<4) | 2):
+ SclPin = 5;
+ SdaPin = 6;
+ SclPort = 't' - 'a';
+ SdaPort = 't' - 'a';
+ break;
+ case ((1<<4) | 3):
+ SclPin = 7;
+ SdaPin = 1;
+ SclPort = 'v' - 'a';
+ SdaPort = 'w' - 'a';
+ break;
+ case ((1<<4) | 4):
+ SclPin = 5;
+ SdaPin = 4;
+ SclPort = 'm' - 'a';
+ SdaPort = 'm' - 'a';
+ break;
+ default:
+ Result = NV_FALSE;
+ break;
+ }
+ }
+ else if (c->ModuleId == NvRmModuleID_Dvc &&
+ c->Instance == 0 &&
+ I2cPinMap == NvOdmI2cPmuPinMap_Config1)
+ {
+ SclPin = 6;
+ SdaPin = 7;
+ SclPort = 'q' - 'a';
+ SdaPort = 'q' - 'a';
+ }
+ else
+ Result = NV_FALSE;
+
+ *Scl = SclPin | (SclPort << 16);
+ *Sda = SdaPin | (SdaPort << 16);
+
+ return Result;
+}
+
+
+static void AP15RmI2cClose(NvRmI2cController *c)
+{
+ if (c->I2cSyncSemaphore)
+ {
+ NvRmInterruptUnregister(c->hRmDevice, c->I2CInterruptHandle);
+ NvOsSemaphoreDestroy(c->I2cSyncSemaphore);
+ c->I2cSyncSemaphore = NULL;
+ c->I2CInterruptHandle = NULL;
+ }
+ c->receive = 0;
+ c->send = 0;
+ c->repeatStart = 0;
+ c->close = 0;
+ c->GetGpioPins = 0;
+}
+
+static NvError
+AP15RmI2cReceive(
+ NvRmI2cController* c,
+ NvU8* pBuffer,
+ const NvRmI2cTransactionInfo *pTransaction,
+ NvU32* pBytesTransferred)
+{
+ NvU32 val = 0;
+ NvU32 ByteCount;
+ NvU32 fifo[2];
+
+ NV_ASSERT(pBuffer);
+ NV_ASSERT(pTransaction->NumBytes > 0);
+
+
+ // If requested i2c is dvc i2c, then disable dvc hardware from using the dvc i2c bus.
+ if (c->ModuleId == NvRmModuleID_Dvc)
+ {
+ val = DVC_REGR(c, CTRL_REG3);
+ val = NV_FLD_SET_DRF_DEF(DVC, CTRL_REG3, I2C_HW_SW_PROG, SW, val);
+ val = NV_FLD_SET_DRF_DEF(DVC, CTRL_REG3, I2C_DONE_INTR_EN, ENABLE, val);
+ DVC_REGW(c, CTRL_REG3, val);
+
+ val = DVC_REGR(c, CTRL_REG1);
+ val = NV_FLD_SET_DRF_DEF(DVC, CTRL_REG1, INTR_EN, ENABLE, val);
+ DVC_REGW(c, CTRL_REG1, val);
+ }
+
+ val = 0;
+
+ if (c->EnableNewMaster)
+ {
+ // Enable new master if it is available
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, NEW_MASTER_FSM, ENABLE);
+ }
+
+ /* 7 bit address */
+ if (c->Is10BitAddress == NV_FALSE)
+ {
+ /* write the slave address */
+ I2C_REGW(c, I2C_CMD_ADDR0, (pTransaction->Address | 1));
+
+ // Configure for read trasaction
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, CMD1, ENABLE);
+ // Configure the slave address type as 7bit address
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, A_MOD,
+ SEVEN_BIT_DEVICE_ADDRESS);
+ }
+ /* 10 bit address */
+ else
+ {
+ /* write the slave address */
+ I2C_REGW(c, I2C_CMD_ADDR0, pTransaction->Address);
+
+ // Configure for read trasaction
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, CMD1, ENABLE);
+ // Configure the slave address type as 10bit address
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, A_MOD,
+ TEN_BIT_DEVICE_ADDRESS);
+ }
+
+ if (c->NoACK)
+ {
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, NOACK, ENABLE);
+ }
+
+ // Calculate the number of bytes that can be read
+ ByteCount = (pTransaction->NumBytes > I2C_PACKET_SIZE) ?
+ I2C_PACKET_SIZE : pTransaction->NumBytes;
+
+ // Initialize the I2C param structure
+ c->TransactionType = I2C_READ;
+ c->I2cTransferStatus = NvError_Timeout;
+
+ // Configure the number of bytes to be read
+ val |= NV_DRF_NUM(I2C, I2C_CNFG, LENGTH, ByteCount - 1);
+ // disable repeated start
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, SLV2, DISABLE);
+ I2C_REGW(c, I2C_CNFG, val);
+
+ // Start the transaction
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, SEND, GO);
+ I2C_REGW(c, I2C_CNFG, val);
+
+ if (!c->I2cSyncSemaphore)
+ {
+ NvRmPrivI2cOalPoll(c);
+ } else
+ {
+ NvOsSemaphoreWaitTimeout(c->I2cSyncSemaphore, c->timeout);
+ }
+
+ /* Controller should return some sort of error. If not, then there is
+ * something gross happened. */
+ if (c->I2cTransferStatus != NvError_Timeout)
+ {
+ if (c->I2cTransferStatus == NvSuccess)
+ {
+ /* Read the FIFO */
+ fifo[0] = I2C_REGR(c, I2C_CMD_DATA1);
+ fifo[1] = I2C_REGR(c, I2C_CMD_DATA2);
+
+ NvOsMemcpy(pBuffer, (NvU8* )fifo, ByteCount);
+ }
+ if (pBytesTransferred != NULL)
+ {
+ *pBytesTransferred = ByteCount;
+ }
+ }
+ else
+ {
+ if (pBytesTransferred != NULL)
+ {
+ *pBytesTransferred = ByteCount;
+ }
+ // In case of timeout, reset the I2C controller
+ NvRmModuleReset(c->hRmDevice, NVRM_MODULE_ID(c->ModuleId, c->Instance));
+ }
+
+ return c->I2cTransferStatus;
+}
+
+static NvError
+AP15RmI2cRepeatStartTransaction(
+ NvRmI2cController *c,
+ NvU8* pBuffer,
+ NvRmI2cTransactionInfo * Transactions,
+ NvU32 NoOfTransations)
+{
+ NvU32 val = 0;
+ NvU32 data = 0;
+ NvU8 *pBuffer1, *pBuffer2;
+
+ NV_ASSERT(pBuffer);
+ NV_ASSERT(Transactions);
+ NV_ASSERT(Transactions[0].NumBytes <= 4);
+ NV_ASSERT(Transactions[1].NumBytes <= 4);
+
+ // If requested i2c is dvc i2c, then disable dvc hardware from using the dvc i2c bus.
+ if (c->ModuleId == NvRmModuleID_Dvc)
+ {
+ val = DVC_REGR(c, CTRL_REG3);
+ val = NV_FLD_SET_DRF_DEF(DVC, CTRL_REG3, I2C_HW_SW_PROG, SW, val);
+ val = NV_FLD_SET_DRF_DEF(DVC, CTRL_REG3, I2C_DONE_INTR_EN, ENABLE, val);
+ DVC_REGW(c, CTRL_REG3, val);
+
+ val = DVC_REGR(c, CTRL_REG1);
+ val = NV_FLD_SET_DRF_DEF(DVC, CTRL_REG1, INTR_EN, ENABLE, val);
+ DVC_REGW(c, CTRL_REG1, val);
+ }
+
+ // There will be always only 2 transations in normal mode
+ pBuffer1 = pBuffer;
+ pBuffer2 = (NvU8 *)(( NvU32)pBuffer + Transactions[0].NumBytes);
+
+ if (c->EnableNewMaster)
+ {
+ // Enable new master if it is available
+ val = NV_DRF_DEF(I2C, I2C_CNFG, NEW_MASTER_FSM, ENABLE);
+ }
+
+
+ if (Transactions[0].Flags & NVRM_I2C_WRITE)
+ {
+ // Configure for CMD 1 as write trasaction
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, CMD1, DISABLE);
+
+ // Prepare the data to be written
+ NvOsMemcpy((NvU8* )&data, (void *)pBuffer1, Transactions[0].NumBytes);
+ // Write the data to the controller data registers
+ I2C_REGW(c, I2C_CMD_DATA1, data);
+ // configure slave1 device address
+ I2C_REGW(c, I2C_CMD_ADDR0, Transactions[0].Address);
+ }
+ else
+ {
+ // Configure for CMD 1 as read trasaction
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, CMD1, ENABLE);
+ // configure slave1 device address
+ I2C_REGW(c, I2C_CMD_ADDR0, (Transactions[0].Address | 1));
+ }
+
+ if (Transactions[1].Flags & NVRM_I2C_WRITE)
+ {
+ // Configure for CMD 2 as write trasaction
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, CMD2, DISABLE);
+
+ //Prepare the data to be written
+ NvOsMemcpy((NvU8* )&data, (void *)pBuffer2, Transactions[1].NumBytes);
+
+ // Write the data to the controller data registers
+ I2C_REGW(c, I2C_CMD_DATA2, data);
+
+ /* write the slave 2 address */
+ I2C_REGW(c, I2C_CMD_ADDR1, Transactions[1].Address);
+ }
+ else
+ {
+ // Configure for CMD 2 as read trasaction
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, CMD2, ENABLE);
+ /* write the slave 2 address */
+ I2C_REGW(c, I2C_CMD_ADDR1, (Transactions[1].Address | 1));
+ }
+
+ /* 7 bit address */
+ if (Transactions[0].Is10BitAddress == NV_FALSE)
+ {
+ // Configure the slave address type as 7bit address
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, A_MOD,
+ SEVEN_BIT_DEVICE_ADDRESS);
+ }
+ /* 10 bit address */
+ else
+ {
+ // Configure the slave address type as 10bit address
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, A_MOD,
+ TEN_BIT_DEVICE_ADDRESS);
+ }
+
+ if (Transactions[0].Flags & NVRM_I2C_NOACK)
+ {
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, NOACK, ENABLE);
+ }
+
+
+ // Initialize the I2C param structure
+ c->TransactionType = I2C_REPEAT_START_TRANSACTION;
+ c->I2cTransferStatus = NvError_Timeout;
+
+ // Configure the number of bytes to read/write
+ val |= NV_DRF_NUM(I2C, I2C_CNFG, LENGTH,
+ Transactions[0].NumBytes - 1);
+ // Configure the slave 2 as present
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, SLV2, ENABLE);
+
+ I2C_REGW(c, I2C_CNFG, val);
+
+ // Start the transaction
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, SEND, GO);
+ I2C_REGW(c, I2C_CNFG, val);
+
+ if (!c->I2cSyncSemaphore)
+ {
+ NvRmPrivI2cOalPoll(c);
+ } else
+ {
+ NvOsSemaphoreWaitTimeout(c->I2cSyncSemaphore, c->timeout);
+ }
+ if (c->I2cTransferStatus != NvError_Timeout)
+ {
+ if (c->I2cTransferStatus == NvSuccess)
+ {
+ if (!(Transactions[0].Flags & NVRM_I2C_WRITE))
+ {
+ // read the data for the first transaction
+ data = I2C_REGR(c, I2C_CMD_DATA1);
+
+ NvOsMemcpy(pBuffer1, (NvU8* )&data, Transactions[0].NumBytes);
+ }
+
+ if (!(Transactions[1].Flags & NVRM_I2C_WRITE))
+ {
+ // read the data for the second transaction
+ data = I2C_REGR(c, I2C_CMD_DATA2);
+
+ NvOsMemcpy(pBuffer2, (NvU8* )&data, Transactions[1].NumBytes);
+ }
+ }
+ }
+ else
+ {
+ // In case of timeout, reset the I2C controller
+ NvRmModuleReset(c->hRmDevice, NVRM_MODULE_ID(c->ModuleId, c->Instance));
+ }
+ return c->I2cTransferStatus;
+}
+
+static NvError
+AP15RmI2cSend(
+ NvRmI2cController *c,
+ NvU8* pBuffer,
+ const NvRmI2cTransactionInfo *pTransaction,
+ NvU32* pBytesTransferred)
+{
+ NvU32 val = 0;
+ NvU32 fifo[2];
+ NvU32 ByteCount;
+
+ NV_ASSERT(pBuffer);
+ NV_ASSERT(pTransaction->NumBytes > 0);
+
+
+ // If requested i2c is dvc i2c, then disable dvc hardware from using the dvc i2c bus.
+ if (c->ModuleId == NvRmModuleID_Dvc)
+ {
+ val = DVC_REGR(c, CTRL_REG3);
+ val = NV_FLD_SET_DRF_DEF(DVC, CTRL_REG3, I2C_HW_SW_PROG, SW, val);
+ val = NV_FLD_SET_DRF_DEF(DVC, CTRL_REG3, I2C_DONE_INTR_EN, ENABLE, val);
+ DVC_REGW(c, CTRL_REG3, val);
+
+ val = DVC_REGR(c, CTRL_REG1);
+ val = NV_FLD_SET_DRF_DEF(DVC, CTRL_REG1, INTR_EN, ENABLE, val);
+ DVC_REGW(c, CTRL_REG1, val);
+ }
+
+ val = 0;
+
+ if (c->EnableNewMaster)
+ {
+ // Enable new master if it is available
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, NEW_MASTER_FSM, ENABLE);
+ }
+
+ // Configure the slave address
+ if (c->Is10BitAddress == NV_FALSE)
+ {
+ /* 7 bit address */
+ /* write the slave address */
+ I2C_REGW(c, I2C_CMD_ADDR0, pTransaction->Address);
+ // Configure for write trasaction
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, CMD1, DISABLE);
+ // Configure the slave address type as 7bit address
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, A_MOD, SEVEN_BIT_DEVICE_ADDRESS);
+ }
+ else
+ {
+ /* 10 bit address */
+
+ /* write the slave address */
+ I2C_REGW(c, I2C_CMD_ADDR0, pTransaction->Address);
+ // Configure for write trasaction
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, CMD1, DISABLE);
+ // Configure the slave address type as 10bit address
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, A_MOD,
+ TEN_BIT_DEVICE_ADDRESS);
+ }
+
+ if (c->NoACK)
+ {
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, NOACK, ENABLE);
+ }
+
+ // Calculate the number of bytes that can be written
+ ByteCount = (pTransaction->NumBytes > I2C_PACKET_SIZE) ?
+ I2C_PACKET_SIZE : pTransaction->NumBytes;
+
+ NvOsMemcpy((NvU8 *)fifo,(void *)pBuffer, ByteCount);
+
+ // Initialize the I2C param structure
+ c->TransactionType = I2C_WRITE;
+ c->I2cTransferStatus = NvError_Timeout;
+
+ // Write the data to the controller data registers
+ I2C_REGW(c, I2C_CMD_DATA1, fifo[0]);
+ I2C_REGW(c, I2C_CMD_DATA2, fifo[1]);
+
+ // Configure the number of bytes to be written
+ val |= NV_DRF_NUM(I2C, I2C_CNFG, LENGTH,
+ ByteCount - 1);
+ // disable repeated start
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, SLV2, DISABLE);
+ I2C_REGW(c, I2C_CNFG, val);
+
+ // Start the transaction
+ val |= NV_DRF_DEF(I2C, I2C_CNFG, SEND, GO);
+ I2C_REGW(c, I2C_CNFG, val);
+
+ if (!c->I2cSyncSemaphore)
+ {
+ // Wait for the transaction to be completed till there is timeout/max retries
+ NvRmPrivI2cOalPoll(c);
+ } else
+ {
+ // Wait for the transaction to be completed till there is timeout
+ NvOsSemaphoreWaitTimeout(c->I2cSyncSemaphore, c->timeout);
+ }
+
+ if (c->I2cTransferStatus == NvSuccess
+ && pBytesTransferred != NULL)
+ {
+ *pBytesTransferred = ByteCount;
+ }
+ if (c->I2cTransferStatus == NvError_Timeout)
+ {
+ // In case of timeout, reset the I2C controller
+ NvRmModuleReset(c->hRmDevice, NVRM_MODULE_ID(c->ModuleId, c->Instance));
+ }
+ return c->I2cTransferStatus;
+}
+
+
+NvError AP15RmI2cOpen(NvRmI2cController *c)
+{
+ NvError status = NvSuccess;
+
+ NV_ASSERT(c!= NULL);
+
+ /* Populate the structures */
+ c->receive = AP15RmI2cReceive;
+ c->send = AP15RmI2cSend;
+ c->repeatStart = AP15RmI2cRepeatStartTransaction;
+ c->close = AP15RmI2cClose;
+ c->GetGpioPins = AP15RmI2cGetGpioPins;
+
+ c->I2cRegisterOffset = I2C_I2C_CNFG_0;
+ if (c->ModuleId == NvRmModuleID_Dvc)
+ {
+ c->I2cRegisterOffset = DVC_I2C_CNFG_0;
+ }
+
+ // Create the sync semaphore
+ status = NvOsSemaphoreCreate( &c->I2cSyncSemaphore, 0);
+
+ if (status == NvSuccess)
+ {
+ NvU32 IrqList;
+ NvOsInterruptHandler IntHandlers;
+
+ /* Install interrupt handler */
+ if (c->ModuleId == NvRmModuleID_Dvc)
+ {
+ IntHandlers = DvcIsr;
+ } else
+ {
+ IntHandlers = I2cIsr;
+ }
+ IrqList = NvRmGetIrqForLogicalInterrupt(
+ c->hRmDevice, NVRM_MODULE_ID(c->ModuleId, c->Instance), 0);
+
+ status = NvRmInterruptRegister(c->hRmDevice, 1, &IrqList, &IntHandlers,
+ c, &c->I2CInterruptHandle, NV_TRUE);
+ if (status != NvSuccess)
+ {
+ /* Fall back to Polling mode, but assert in debug build */
+ NV_ASSERT(!"I2C module interrupt register failed!");
+ NvOsSemaphoreDestroy(c->I2cSyncSemaphore);
+ c->I2cSyncSemaphore = 0;
+ }
+ }
+
+ return NvSuccess;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c
new file mode 100644
index 000000000000..39debb0da861
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+
+/** @file
+ * @brief <b>NVIDIA Driver Development Kit: PWM API</b>
+ *
+ * @b Description: Contains the NvRM PWM implementation.
+ */
+
+#include "ap15rm_pwm_private.h"
+#include "nvrm_drf.h"
+#include "nvos.h"
+#include "nvrm_module.h"
+#include "nvrm_hardware_access.h"
+#include "nvrm_power.h"
+#include "nvrm_interrupt.h"
+#include "nvassert.h"
+#include "nvodm_query_pinmux.h"
+#include "nvodm_modules.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_hwintf.h"
+#include "ap15/arpwfm.h"
+#include "ap15/arapbpm.h"
+
+#define PWM_REGR( VirtualAddress, offset ) \
+ NV_READ32(VirtualAddress + offset)
+
+#define PWM_REGW( VirtualAddress, offset, value ) \
+ NV_WRITE32(VirtualAddress + offset, value)
+
+#define PMC_REGR( VirtualAddress, offset ) \
+ NV_READ32(VirtualAddress + offset)
+
+#define PMC_REGW( VirtualAddress, offset, value ) \
+ NV_WRITE32(VirtualAddress + offset, value)
+
+static NvU32 s_PwmPowerID = 0;
+static NvOsMutexHandle s_hPwmMutex = NULL;
+static NvRmPwmHandle s_hPwm = NULL;
+static NvBool s_IsPwmFirstConfig = NV_FALSE;
+static NvBool s_IsFreqDividerSupported = NV_FALSE;
+
+static NvError PwmPowerConfigure(NvRmPwmHandle hPwm, NvBool IsEnablePower)
+{
+ NvError status = NvSuccess;
+
+ if (IsEnablePower == NV_TRUE)
+ {
+ // Enable power
+ status = NvRmPowerVoltageControl(hPwm->RmDeviceHandle,
+ NVRM_MODULE_ID(NvRmModuleID_Pwm, 0),
+ s_PwmPowerID,
+ NvRmVoltsUnspecified,
+ NvRmVoltsUnspecified,
+ NULL,
+ 0,
+ NULL);
+ if (status == NvSuccess)
+ {
+ // Enable the clock to the pwm controller
+ status = NvRmPowerModuleClockControl(hPwm->RmDeviceHandle,
+ NVRM_MODULE_ID(NvRmModuleID_Pwm, 0),
+ s_PwmPowerID,
+ NV_TRUE);
+ }
+ }
+ else
+ {
+ // Disable the clock to the pwm controller
+ status = NvRmPowerModuleClockControl(hPwm->RmDeviceHandle,
+ NVRM_MODULE_ID(NvRmModuleID_Pwm, 0),
+ s_PwmPowerID,
+ NV_FALSE);
+
+ if(status == NvSuccess)
+ {
+ // Disable power
+ status = NvRmPowerVoltageControl(hPwm->RmDeviceHandle,
+ NVRM_MODULE_ID(NvRmModuleID_Pwm, 0),
+ s_PwmPowerID,
+ NvRmVoltsOff,
+ NvRmVoltsOff,
+ NULL,
+ 0,
+ NULL);
+ }
+ }
+
+ return status;
+}
+
+
+static NvError PwmCheckValidConfig(NvRmPwmHandle hPwm,
+ NvRmPwmOutputId OutputId,
+ NvRmPwmMode Mode)
+{
+ NvError status = NvSuccess;
+ NvRmModulePwmInterfaceCaps PwmCaps;
+
+ if ((Mode != NvRmPwmMode_Disable) &&
+ (Mode != NvRmPwmMode_Enable))
+ return NvError_NotSupported;
+
+ status = NvRmGetModuleInterfaceCapabilities(hPwm->RmDeviceHandle,
+ NVRM_MODULE_ID(NvRmModuleID_Pwm, 0),
+ sizeof(NvRmModulePwmInterfaceCaps),
+ &PwmCaps);
+ if (status != NvSuccess)
+ return status;
+
+ if (PwmCaps.PwmOutputIdSupported & (1 << (OutputId-1)))
+ return status;
+ else
+ return NvError_NotSupported;
+}
+
+NvError NvRmPrivPwmInit(NvRmDeviceHandle hRm);
+NvError NvRmPrivPwmInit(NvRmDeviceHandle hRm)
+{
+ NvError status = NvSuccess;
+
+ // Creating the Mutex
+ status = NvOsMutexCreate(&s_hPwmMutex);
+ return status;
+}
+
+void NvRmPrivPwmDeInit(NvRmDeviceHandle hRm);
+void NvRmPrivPwmDeInit(NvRmDeviceHandle hRm)
+{
+ NvOsMutexDestroy(s_hPwmMutex);
+}
+
+NvError
+NvRmPwmOpen(
+ NvRmDeviceHandle hDevice,
+ NvRmPwmHandle *phPwm)
+{
+ NvError status = NvSuccess;
+ NvU32 PwmPhysAddr = 0, i = 0, PmcPhysAddr = 0;
+ NvRmModuleCapability caps[4];
+ NvRmModuleCapability *pCap = NULL;
+
+ NV_ASSERT(hDevice);
+ NV_ASSERT(phPwm);
+
+ NvOsMutexLock(s_hPwmMutex);
+
+ if (s_hPwm)
+ {
+ s_hPwm->RefCount++;
+ goto exit;
+ }
+
+ // Allcoate the memory for the pwm handle
+ s_hPwm = NvOsAlloc(sizeof(NvRmPwm));
+ if (!s_hPwm)
+ {
+ status = NvError_InsufficientMemory;
+ goto fail;
+ }
+ NvOsMemset(s_hPwm, 0, sizeof(NvRmPwm));
+
+ // Set the pwm handle parameters
+ s_hPwm->RmDeviceHandle = hDevice;
+
+ // Get the pwm physical and virtual base address
+ NvRmModuleGetBaseAddress(hDevice,
+ NVRM_MODULE_ID(NvRmModuleID_Pwm, 0),
+ &PwmPhysAddr, &(s_hPwm->PwmBankSize));
+ s_hPwm->PwmBankSize = PWM_BANK_SIZE;
+ for (i = 0; i < NvRmPwmOutputId_Num-2; i++)
+ {
+ status = NvRmPhysicalMemMap(
+ PwmPhysAddr + i*s_hPwm->PwmBankSize,
+ s_hPwm->PwmBankSize,
+ NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached,
+ (void**)&s_hPwm->VirtualAddress[i]);
+ if (status != NvSuccess)
+ {
+ NvOsFree(s_hPwm);
+ goto fail;
+ }
+ }
+
+ // Get the pmc physical and virtual base address
+ NvRmModuleGetBaseAddress(hDevice,
+ NVRM_MODULE_ID(NvRmModuleID_Pmif, 0),
+ &PmcPhysAddr, &(s_hPwm->PmcBankSize));
+ s_hPwm->PmcBankSize = PMC_BANK_SIZE;
+
+ status = NvRmPhysicalMemMap(
+ PmcPhysAddr,
+ s_hPwm->PmcBankSize,
+ NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached,
+ (void**)&s_hPwm->VirtualAddress[NvRmPwmOutputId_Num-2]);
+ if (status != NvSuccess)
+ {
+ NvOsFree(s_hPwm);
+ goto fail;
+ }
+
+ caps[0].MajorVersion = 1;
+ caps[0].MinorVersion = 0;
+ caps[0].EcoLevel = 0;
+ caps[0].Capability = &caps[0];
+
+ caps[1].MajorVersion = 1;
+ caps[1].MinorVersion = 1;
+ caps[1].EcoLevel = 0;
+ caps[1].Capability = &caps[1];
+
+ caps[2].MajorVersion = 1;
+ caps[2].MinorVersion = 2;
+ caps[2].EcoLevel = 0;
+ caps[2].Capability = &caps[2];
+
+ caps[3].MajorVersion = 2;
+ caps[3].MinorVersion = 0;
+ caps[3].EcoLevel = 0;
+ caps[3].Capability = &caps[3];
+
+ NV_ASSERT_SUCCESS(NvRmModuleGetCapabilities(
+ hDevice,
+ NvRmModuleID_Pwm,
+ caps,
+ sizeof(caps)/sizeof(caps[0]),
+ (void**)&pCap));
+
+ if ((pCap->MajorVersion > 1) ||
+ ((pCap->MajorVersion == 1) && (pCap->MinorVersion > 0)))
+ s_IsFreqDividerSupported = NV_TRUE;
+
+ s_hPwm->RefCount++;
+exit:
+ *phPwm = s_hPwm;
+ NvOsMutexUnlock(s_hPwmMutex);
+ return NvSuccess;
+
+fail:
+ NvOsMutexUnlock(s_hPwmMutex);
+ return status;
+}
+
+void NvRmPwmClose(NvRmPwmHandle hPwm)
+{
+ NvU32 i;
+ if (!hPwm)
+ return;
+
+ NV_ASSERT(hPwm->RefCount);
+
+ NvOsMutexLock(s_hPwmMutex);
+ hPwm->RefCount--;
+ if (hPwm->RefCount == 0)
+ {
+ // Unmap the pwm register virtual address space
+ for (i = 0; i < NvRmPwmOutputId_Num-2; i++)
+ {
+ NvRmPhysicalMemUnmap((void*)s_hPwm->VirtualAddress[i],
+ s_hPwm->PwmBankSize);
+ }
+
+ // Unmap the pmc register virtual address space
+ NvRmPhysicalMemUnmap(
+ (void*)s_hPwm->VirtualAddress[NvRmPwmOutputId_Num-2],
+ s_hPwm->PmcBankSize);
+
+ if (s_IsPwmFirstConfig)
+ {
+ // Disable power
+ PwmPowerConfigure(hPwm, NV_FALSE);
+
+ // Unregister with RM power
+ NvRmPowerUnRegister(hPwm->RmDeviceHandle, s_PwmPowerID);
+
+ // Tri-state the pin-mux pins
+ NV_ASSERT_SUCCESS(NvRmSetModuleTristate(hPwm->RmDeviceHandle,
+ NVRM_MODULE_ID(NvRmModuleID_Pwm, 0), NV_TRUE));
+ s_IsPwmFirstConfig = NV_FALSE;
+ }
+ NvOsFree(s_hPwm);
+ s_hPwm = NULL;
+ }
+ NvOsMutexUnlock(s_hPwmMutex);
+}
+
+#define MAX_DUTY_CYCLE 256
+
+NvError NvRmPwmConfig(
+ NvRmPwmHandle hPwm,
+ NvRmPwmOutputId OutputId,
+ NvRmPwmMode Mode,
+ NvU32 DutyCycle,
+ NvU32 RequestedFreqHzOrPeriod,
+ NvU32 *pCurrentFreqHzOrPeriod)
+{
+ NvError status = NvSuccess;
+ NvU32 RegValue = 0, ResultFreqKHz = 0;
+ NvU8 PwmMode = 0;
+ NvU32 ClockFreqKHz = 0, DCycle = 0, DataOn = 0, DataOff = 0;
+ NvU32 PmcCtrlReg = 0, PmcDpdPadsReg = 0, PmcBlinkTimerReg = 0;
+ NvU32 RequestPeriod = 0, ResultPeriod = 0;
+ NvU32 DataOnRegVal = 0, DataOffRegVal = 0;
+ NvU32 *pPinMuxConfigTable = NULL;
+ NvU32 Count = 0, divider = 1;
+
+ NvOsMutexLock(s_hPwmMutex);
+
+ if (OutputId != NvRmPwmOutputId_Blink)
+ {
+ if (!s_IsPwmFirstConfig)
+ {
+ // Register with RM power
+ status = NvRmPowerRegister(hPwm->RmDeviceHandle, NULL, &s_PwmPowerID);
+ if (status != NvSuccess)
+ goto fail;
+
+ // Enable power
+ status = PwmPowerConfigure(hPwm, NV_TRUE);
+ if (status != NvSuccess)
+ {
+ NvRmPowerUnRegister(hPwm->RmDeviceHandle, s_PwmPowerID);
+ goto fail;
+ }
+
+ // Reset pwm module
+ NvRmModuleReset(hPwm->RmDeviceHandle, NVRM_MODULE_ID(NvRmModuleID_Pwm, 0));
+
+ // Config pwm pinmux
+ NvOdmQueryPinMux(NvOdmIoModule_Pwm, (const NvU32 **)&pPinMuxConfigTable,
+ &Count);
+ if (Count != 1)
+ {
+ status = NvError_NotSupported;
+ PwmPowerConfigure(hPwm, NV_FALSE);
+ NvRmPowerUnRegister(hPwm->RmDeviceHandle, s_PwmPowerID);
+ goto fail;
+ }
+ hPwm->PinMap = pPinMuxConfigTable[0];
+ status = NvRmSetModuleTristate(hPwm->RmDeviceHandle,
+ NVRM_MODULE_ID(NvRmModuleID_Pwm, 0), NV_FALSE);
+
+ if (status != NvSuccess)
+ {
+ PwmPowerConfigure(hPwm, NV_FALSE);
+ NvRmPowerUnRegister(hPwm->RmDeviceHandle, s_PwmPowerID);
+ goto fail;
+ }
+ s_IsPwmFirstConfig = NV_TRUE;
+ }
+
+ // Validate PWM output and pin map config
+ status = PwmCheckValidConfig(hPwm, OutputId, Mode);
+ if (status != NvSuccess)
+ goto fail;
+
+ ClockFreqKHz = (RequestedFreqHzOrPeriod * MAX_DUTY_CYCLE) / 1000;
+ if (ClockFreqKHz == 0)
+ ClockFreqKHz = 1;
+
+ if (RequestedFreqHzOrPeriod == NvRmFreqMaximum)
+ ClockFreqKHz = NvRmFreqMaximum;
+
+ status = NvRmPowerModuleClockConfig(hPwm->RmDeviceHandle,
+ NVRM_MODULE_ID(NvRmModuleID_Pwm, 0),
+ s_PwmPowerID,
+ NvRmFreqUnspecified,
+ NvRmFreqUnspecified,
+ &ClockFreqKHz,
+ 1,
+ &ResultFreqKHz,
+ 0);
+ if (status != NvSuccess)
+ goto fail;
+
+ *pCurrentFreqHzOrPeriod = (ResultFreqKHz * 1000) / MAX_DUTY_CYCLE;
+
+ if (Mode == NvRmPwmMode_Disable)
+ PwmMode = 0;
+ else
+ PwmMode = 1;
+
+ /*
+ * Convert from percentage unsigned 15.16 fixed point
+ * format to actual register value
+ */
+ DCycle = (NvU8)((DutyCycle * MAX_DUTY_CYCLE/100)>>16);
+
+ RegValue = PWM_SETNUM(CSR_0, ENB, PwmMode) |
+ PWM_SETNUM(CSR_0, PWM_0, DCycle);
+
+ if (s_IsFreqDividerSupported)
+ {
+ if ((*pCurrentFreqHzOrPeriod > RequestedFreqHzOrPeriod) &&
+ (RequestedFreqHzOrPeriod != 0))
+ {
+ divider = *pCurrentFreqHzOrPeriod/RequestedFreqHzOrPeriod;
+ if ((*pCurrentFreqHzOrPeriod%RequestedFreqHzOrPeriod)*2>RequestedFreqHzOrPeriod)
+ divider +=1;
+ *pCurrentFreqHzOrPeriod = *pCurrentFreqHzOrPeriod / divider;
+ RegValue |= PWM_SETNUM(CSR_0, N_A_2, divider);
+ }
+ }
+
+ PWM_REGW(hPwm->VirtualAddress[OutputId-1], 0, RegValue);
+ }
+ else
+ {
+ RequestPeriod = RequestedFreqHzOrPeriod;
+ DCycle = DutyCycle>>16;
+ DataOn = (RequestPeriod * DCycle)/100;
+ if (DataOn > MAX_SUPPORTED_PERIOD)
+ {
+ ResultPeriod = (MAX_SUPPORTED_PERIOD * 100)/DCycle;
+ DataOn = MAX_SUPPORTED_PERIOD;
+ }
+ else
+ {
+ ResultPeriod = RequestPeriod;
+ }
+ DataOff = ResultPeriod - DataOn;
+ DataOnRegVal = DataOn * DATA_ON_FACTOR;
+ if (DataOnRegVal >= MAX_DATA_ON)
+ DataOnRegVal = MAX_DATA_ON;
+ DataOffRegVal = DataOff * DATA_ON_FACTOR;
+ if (DataOffRegVal >= MAX_DATA_ON)
+ DataOffRegVal = MAX_DATA_ON;
+
+ PmcCtrlReg = PMC_REGR(hPwm->VirtualAddress[OutputId-1],
+ APBDEV_PMC_CNTRL_0);
+ PmcDpdPadsReg = PMC_REGR(hPwm->VirtualAddress[OutputId-1],
+ APBDEV_PMC_DPD_PADS_ORIDE_0);
+ PmcBlinkTimerReg = PMC_REGR(hPwm->VirtualAddress[OutputId-1],
+ APBDEV_PMC_BLINK_TIMER_0);
+ PmcBlinkTimerReg &=~PMC_SETNUM(BLINK_TIMER, DATA_OFF, 0xFFFF);
+ PmcBlinkTimerReg &=~PMC_SETNUM(BLINK_TIMER, DATA_ON, 0xFFFF);
+ PmcBlinkTimerReg |=PMC_SETNUM(BLINK_TIMER, DATA_OFF, DataOffRegVal);
+ PmcBlinkTimerReg |=PMC_SETNUM(BLINK_TIMER, DATA_ON, DataOnRegVal);
+ PmcCtrlReg |= PMC_SETDEF(CNTRL, BLINK_EN, ENABLE);
+ PmcDpdPadsReg |= PMC_SETDEF(DPD_PADS_ORIDE, BLINK, ENABLE);
+ if (Mode == NvRmPwmMode_Blink_LED)
+ {
+ PmcBlinkTimerReg |= (1 << 15);
+ }
+
+ if (Mode == NvRmPwmMode_Blink_32KHzClockOutput)
+ {
+ PmcBlinkTimerReg &= ~(1 << 15);
+ }
+
+ if (Mode == NvRmPwmMode_Blink_Disable)
+ {
+ PmcCtrlReg &= ~PMC_SETDEF(CNTRL, BLINK_EN, ENABLE);
+ PmcDpdPadsReg &= ~PMC_SETDEF(DPD_PADS_ORIDE, BLINK, ENABLE);
+ }
+ PmcBlinkTimerReg |=PMC_SETNUM(BLINK_TIMER, DATA_OFF, DataOffRegVal)
+ | PMC_SETNUM(BLINK_TIMER, DATA_ON, DataOnRegVal);
+ PMC_REGW(hPwm->VirtualAddress[OutputId-1],
+ APBDEV_PMC_CNTRL_0, PmcCtrlReg);
+ PMC_REGW(hPwm->VirtualAddress[OutputId-1],
+ APBDEV_PMC_DPD_PADS_ORIDE_0, PmcDpdPadsReg);
+ PMC_REGW(hPwm->VirtualAddress[OutputId-1],
+ APBDEV_PMC_BLINK_TIMER_0, PmcBlinkTimerReg);
+ *pCurrentFreqHzOrPeriod = ResultPeriod;
+ }
+fail:
+ NvOsMutexUnlock(s_hPwmMutex);
+ return status;
+}
+
+
+
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm_private.h b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm_private.h
new file mode 100644
index 000000000000..dadffcb14efe
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm_private.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/** @file
+ * @brief <b>NVIDIA Driver Development Kit: Timer API</b>
+ *
+ * @b Description: Contains the pwm declarations.
+ */
+
+#ifndef INCLUDED_PWM_PRIVATE_H
+#define INCLUDED_PWM_PRIVATE_H
+
+#include "nvrm_module.h"
+#include "nvodm_query_pinmux.h"
+#include "nvrm_pwm.h"
+
+#define PWM_BANK_SIZE 16
+#define PMC_BANK_SIZE 192
+#define MAX_SUPPORTED_PERIOD 16
+#define MAX_DATA_ON 0xFFFF
+#define DATA_ON_FACTOR 8194 // 8194 = 1/(4 * 30.51us)
+
+typedef struct NvRmPwmRec
+{
+ // RM device handle
+ NvRmDeviceHandle RmDeviceHandle;
+
+ // Pwm configuration pin-map.
+ NvOdmPwmPinMap PinMap;
+
+ // Pwm open reference count
+ NvU32 RefCount;
+
+ // Pwm virtual base address
+ NvU32 VirtualAddress[NvRmPwmOutputId_Num-1];
+
+ // Pwm bank size
+ NvU32 PwmBankSize;
+
+ // Pmc bank size
+ NvU32 PmcBankSize;
+
+} NvRmPwm;
+
+#define PWM_RESET(r) NV_RESETVAL(PWM_CONTROLLER_PWM,r)
+#define PWM_SETDEF(r,f,c) NV_DRF_DEF(PWM_CONTROLLER_PWM,r,f,c)
+#define PWM_SETNUM(r,f,n) NV_DRF_NUM(PWM_CONTROLLER_PWM,r,f,n)
+#define PWM_GET(r,f,v) NV_DRF_VAL(PWM_CONTROLLER_PWM,r,f,v)
+#define PWM_CLRSETDEF(v,r,f,c) NV_FLD_SET_DRF_DEF(PWM_CONTROLLER,r,f,c,v)
+#define PWM_CLRSETNUM(v,r,f,n) NV_FLD_SET_DRF_NUM(PWM_CONTROLLER,r,f,n,v)
+#define PWM_MASK(x,y) (1 << (PWM_CONTROLLER_##x##_0 - PWMCONTROLLER_##y##_0))
+
+#define PMC_RESET(r) NV_RESETVAL(APBDEV_PMC,r)
+#define PMC_SETDEF(r,f,c) NV_DRF_DEF(APBDEV_PMC,r,f,c)
+#define PMC_SETNUM(r,f,n) NV_DRF_NUM(APBDEV_PMC,r,f,n)
+#define PMC_GET(r,f,v) NV_DRF_VAL(APBDEV_PMC,r,f,v)
+#define PMC_CLRSETDEF(v,r,f,c) NV_FLD_SET_DRF_DEF(APBDEV_PMC,r,f,c,v)
+#define PMC_CLRSETNUM(v,r,f,n) NV_FLD_SET_DRF_NUM(APBDEV_PMC,r,f,n,v)
+
+#endif // INCLUDED_PWM_PRIVATE_H
+
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_slink_hw_private.c b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_slink_hw_private.c
new file mode 100644
index 000000000000..68f29876d85f
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_slink_hw_private.c
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA driver Development Kit:
+ * Private functions implementation for the slink Rm driver</b>
+ *
+ * @b Description: Implements the private functions for the slink hw interface.
+ *
+ */
+
+// hardware includes
+#include "ap15/arslink.h"
+#include "rm_spi_slink_hw_private.h"
+#include "nvrm_drf.h"
+#include "nvrm_hardware_access.h"
+#include "nvassert.h"
+#include "nvos.h"
+
+#define SLINK_REG_READ32(pSlinkHwRegsVirtBaseAdd, reg) \
+ NV_READ32((pSlinkHwRegsVirtBaseAdd) + ((SLINK_##reg##_0)/4))
+#define SLINK_REG_WRITE32(pSlinkHwRegsVirtBaseAdd, reg, val) \
+ do { \
+ NV_WRITE32((((pSlinkHwRegsVirtBaseAdd) + ((SLINK_##reg##_0)/4))), (val)); \
+ } while(0)
+
+
+#define MAX_SLINK_FIFO_DEPTH 32
+
+#define ALL_SLINK_STATUS_CLEAR \
+ (NV_DRF_NUM(SLINK, STATUS, RDY, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, RX_UNF, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, TX_UNF, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, TX_OVF, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, RX_OVF, 1))
+
+static void
+SlinkHwSetSignalMode(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvOdmQuerySpiSignalMode SignalMode);
+
+/**
+ * Initialize the slink register.
+ */
+static void
+SlinkHwRegisterInitialize(
+ NvU32 SlinkInstanceId,
+ SerialHwRegisters *pSlinkHwRegs)
+{
+ NvU32 CommandReg1;
+ pSlinkHwRegs->InstanceId = SlinkInstanceId;
+ pSlinkHwRegs->pRegsBaseAdd = NULL;
+ pSlinkHwRegs->RegBankSize = 0;
+ pSlinkHwRegs->HwTxFifoAdd = SLINK_TX_FIFO_0;
+ pSlinkHwRegs->HwRxFifoAdd = SLINK_RX_FIFO_0;
+ pSlinkHwRegs->IsPackedMode = NV_FALSE;
+ pSlinkHwRegs->PacketLength = 1;
+ pSlinkHwRegs->CurrSignalMode = NvOdmQuerySpiSignalMode_Invalid;
+ pSlinkHwRegs->MaxWordTransfer = MAX_SLINK_FIFO_DEPTH;
+ pSlinkHwRegs->IsLsbFirst = NV_FALSE;
+ pSlinkHwRegs->IsMasterMode = NV_TRUE;
+ pSlinkHwRegs->IsNonWordAlignedPackModeSupported = NV_FALSE;
+
+ CommandReg1 = NV_RESETVAL(SLINK, COMMAND);
+
+ // Initialize the chip select bits to select the s/w only
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CS_SW, SOFT, CommandReg1);
+
+ // Set chip select to normal high level. (inverted polarity).
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CS_VALUE, HIGH, CommandReg1);
+
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, M_S, MASTER, CommandReg1);
+
+ if (pSlinkHwRegs->IsIdleDataOutHigh)
+ {
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, ACTIVE_SDA, DRIVE_HIGH, CommandReg1);
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, IDLE_SDA, DRIVE_HIGH, CommandReg1);
+ }
+ else
+ {
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, ACTIVE_SDA, DRIVE_LOW, CommandReg1);
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, IDLE_SDA, DRIVE_LOW, CommandReg1);
+ }
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command1 = CommandReg1;
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command2 = NV_RESETVAL(SLINK, COMMAND2);
+ pSlinkHwRegs->HwRegs.SlinkRegs.Status = NV_RESETVAL(SLINK, STATUS);
+ pSlinkHwRegs->HwRegs.SlinkRegs.DmaControl = NV_RESETVAL(SLINK, DMA_CTL);
+}
+
+/**
+ * Set the signal mode of communication whether this is the mode 0, 1, 2 or 3.
+ */
+static void
+SlinkHwSetSignalMode(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvOdmQuerySpiSignalMode SignalMode)
+{
+ NvU32 CommandReg = pSlinkHwRegs->HwRegs.SlinkRegs.Command1;
+ switch (SignalMode)
+ {
+ case NvOdmQuerySpiSignalMode_0:
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, ACTIVE_SCLK,
+ DRIVE_LOW, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, IDLE_SCLK,
+ DRIVE_LOW, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CK_SDA, FIRST_CLK_EDGE,
+ CommandReg);
+ break;
+
+ case NvOdmQuerySpiSignalMode_1:
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, ACTIVE_SCLK,
+ DRIVE_LOW, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, IDLE_SCLK,
+ DRIVE_LOW, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CK_SDA, SECOND_CLK_EDGE,
+ CommandReg);
+ break;
+
+ case NvOdmQuerySpiSignalMode_2:
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, ACTIVE_SCLK,
+ DRIVE_HIGH, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, IDLE_SCLK,
+ DRIVE_HIGH, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CK_SDA, FIRST_CLK_EDGE,
+ CommandReg);
+ break;
+ case NvOdmQuerySpiSignalMode_3:
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, ACTIVE_SCLK,
+ DRIVE_HIGH, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, IDLE_SCLK,
+ DRIVE_HIGH, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CK_SDA, SECOND_CLK_EDGE,
+ CommandReg);
+ break;
+ default:
+ NV_ASSERT(!"Invalid SignalMode");
+
+ }
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command1 = CommandReg;
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, COMMAND, CommandReg);
+ pSlinkHwRegs->CurrSignalMode = SignalMode;
+}
+
+/**
+ * Set the chip select signal level to be default based on device during the
+ * initialization.
+ */
+static void
+SlinkHwSetChipSelectDefaultLevelFxn(
+ SerialHwRegisters *pHwRegs,
+ NvU32 ChipSelectId,
+ NvBool IsHigh)
+{
+ // No control over the individual cs lines.
+}
+
+static void
+SlinkHwSetCSActiveForTotalWordsFxn(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvU32 TotalWords)
+{
+ // This is AP20 specific feature, so do nothing
+}
+
+
+/**
+ * Set the chip select signal level.
+ */
+static void
+SlinkHwSetChipSelectLevel(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvU32 ChipSelectId,
+ NvBool IsHigh)
+{
+ NvU32 CommandReg1 = pSlinkHwRegs->HwRegs.SlinkRegs.Command1;
+ NvU32 CommandReg2 = pSlinkHwRegs->HwRegs.SlinkRegs.Command2;
+
+ // Set the chip select level.
+ if (IsHigh)
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CS_VALUE, LOW, CommandReg1);
+ else
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CS_VALUE, HIGH, CommandReg1);
+
+ switch (ChipSelectId)
+ {
+ case 0:
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, SS_EN, CS0, CommandReg2);
+ break;
+
+ case 1:
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, SS_EN, CS1, CommandReg2);
+ break;
+
+ case 2:
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, SS_EN, CS2, CommandReg2);
+ break;
+
+ case 3:
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, SS_EN, CS3, CommandReg2);
+ break;
+
+ default:
+ NV_ASSERT(!"Invalid ChipSelectId");
+ }
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command1 = CommandReg1;
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command2 = CommandReg2;
+
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, COMMAND2,
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command2);
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, COMMAND,
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command1);
+}
+
+/**
+ * Write into the transmit fifo register.
+ * returns the number of words written.
+ */
+static NvU32
+SlinkHwWriteInTransmitFifo(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvU32 *pTxBuff,
+ NvU32 WordRequested)
+{
+ NvU32 WordWritten = 0;
+ NvU32 WordsRemaining = NV_MIN(WordRequested, MAX_SLINK_FIFO_DEPTH);
+
+ while (WordsRemaining)
+ {
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, TX_FIFO, *pTxBuff);
+ pTxBuff++;
+ WordsRemaining--;
+ WordWritten++;
+ }
+ return WordWritten;
+}
+
+/**
+ * Read the data from the receive fifo.
+ * Returns the number of words it read.
+ */
+static NvU32
+SlinkHwReadFromReceiveFifo(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvU32 *pRxBuff,
+ NvU32 WordRequested)
+{
+ NvU32 WordsRemaining = WordRequested;
+ while (WordsRemaining)
+ {
+ *pRxBuff = SLINK_REG_READ32(pSlinkHwRegs->pRegsBaseAdd, RX_FIFO);
+ pRxBuff++;
+ WordsRemaining--;
+ }
+ return WordRequested;
+}
+
+/**
+ * Initialize the slink intterface for the hw access.
+ */
+void NvRmPrivSpiSlinkInitSlinkInterface_v1_0(HwInterface *pSlinkInterface)
+{
+ pSlinkInterface->HwRegisterInitializeFxn = SlinkHwRegisterInitialize;
+ pSlinkInterface->HwSetSignalModeFxn = SlinkHwSetSignalMode;
+ pSlinkInterface->HwSetChipSelectDefaultLevelFxn = SlinkHwSetChipSelectDefaultLevelFxn;
+ pSlinkInterface->HwSetChipSelectLevelFxn = SlinkHwSetChipSelectLevel;
+ pSlinkInterface->HwWriteInTransmitFifoFxn = SlinkHwWriteInTransmitFifo;
+ pSlinkInterface->HwReadFromReceiveFifoFxn = SlinkHwReadFromReceiveFifo;
+ pSlinkInterface->HwSetCSActiveForTotalWordsFxn = SlinkHwSetCSActiveForTotalWordsFxn;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_dma.c b/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_dma.c
new file mode 100644
index 000000000000..ee25aedbd1ee
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_dma.c
@@ -0,0 +1,1926 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * DMA Resource manager </b>
+ *
+ * @b Description: Implements the interface of the NvRM DMA. This files
+ * implements the API for the dma for the AP15 Dma controller.
+ *
+ * This file contains the common code for ap15 dma controller to manage
+ * the different operation of the dma.
+ */
+
+/**
+ * Dma Design details
+ * ------------------
+ * 1. There is two type of dma allocation i.e low priority and high priority
+ * dma. The low prioirty allocation shares the same dma channel between different
+ * client. The high prioirty allocation does not share the dma channel and the
+ * dma channel is used by the requestd clients only. Hence, the high priority
+ * dma allocation may fail if there is no channel for the allocation but the low
+ * priority channel allocation will not fail till we have the sufficient memory
+ * for the dma handle creation.
+ *
+ * 2. The dma allocation is done based on the requestor module Id. It only
+ * support the dma transfer from teh memory to the apb peripheral or vice versa.
+ *
+ * 3. The DmaTransfer transfers the data from source to dest and dest to source
+ * based on the direction passed. It may be possible to do the dma transfer
+ * from destination to source address by passing the dma direction as reverse.
+ *
+ * 4. The destination and source address may be any type like peripheral or
+ * memory or xmb memory. There is no restriction on passing the source/destn
+ * address by the client. The implementation will take care of proper
+ * configuration of the dma register address.
+ *
+ * 5. It may be possible to free the dma when transfer is going on.
+ * In this case, the dma will be free for the another allocation once the
+ * transfer completes. The dma handle will be destroyed immediately for the
+ * client.
+ *
+ * 6. It is possible to abort the dma transfer for both type of dma, high
+ * priority and low priority. In this case, the dma transfer will be immediatly
+ * stops if the transfer is going on for the requestor client and all dma
+ * request will be aborted.
+ *
+ * 7. The client can request for any ammount of the data transfer. If dma is not
+ * capable of transferring the data in one transaction, it will do the multiple
+ * transaction internally and will notify the client after last transaction.
+ *
+ *
+ * Implementation details
+ * ----------------------
+ * 1. The implementation should support any number of the apb dma
+ * channel on run time. There should not be any static allocation till it
+ * very necessarily. It does not support the ahb dma.
+ *
+ * 2. 1 dma channel allocated for the low priority dma channel allocation to
+ * allocate the low priority dma handle. These channes are shared between the
+ * low priority reqestor clients.
+ *
+ * 3. The client will abort the dma request done by him only. It can not cancel
+ * the request done by other clients.
+ *
+ * 4. Dma Request can be queued and there is not any limitation to queue the
+ * request till we have the sufficient memory from the os.
+ *
+ * 5. It supports the synchrnous and asynchrnous, both type of the operation.
+ *
+ * 6. For each dma channel, it allocates the memory for keeping the client
+ * request.
+ * if the number of request is more than the allocated number of list then it
+ * again reallocate the memory for the new request and free the already allocated
+ * list. The old request transferered to the new allocated list. the benifit
+ * of this type of method is that we need not to do the allocation to queue the
+ * request for each transfer request. In this way we can avoid the memory
+ * allocation and freeing of the memory for the each time.
+ * We start the allocation of memory from n and if the number of request is more
+ * than this (n) then reallocation is done for the (n +n) request and if it is
+ * full then again reallocation is done for the (2n + 2n). In this way the order
+ * of allocation is Log(n).
+ *
+ * 7. All apb dma channel inetrrupt is handle in single isr.
+ * The detection of the interrupted dma channel is done by scanning all the dma
+ * channels one by one.
+ *
+ * 8. The apb dma hw control api is called using the function pointer. So
+ * whenever there is difefrence in the handling of the dma request for dma
+ * channel, it uses the dma hw interface.
+ *
+ * 9. I2s channels related request will use the continuous double buffering.
+ * Uart receive (from fifo to memory) will use the continuous double buffering
+ * on same buffer.
+ *
+ */
+
+#include "nvrm_dma.h"
+#include "nvrm_interrupt.h"
+#include "nvrm_power.h"
+#include "nvrm_moduleids.h"
+#include "nvrm_hardware_access.h"
+#include "rm_dma_hw_private.h"
+#include "nvassert.h"
+#include "nvrm_priv_ap_general.h"
+
+/* FIXME move these to some header file */
+NvError NvRmPrivDmaInit(NvRmDeviceHandle hDevice);
+void NvRmPrivDmaDeInit(void);
+NvError NvRmPrivDmaSuspend(void);
+NvError NvRmPrivDmaResume(void);
+
+#define MAX_AVP_DMA_CHANNELS 3
+
+// DMA capabilities -- these currently do not vary between chips
+
+// Maximum dma transfer size for one transfer.
+#define DMA_MAX_TRANSFER_SIZE 0x10000
+
+// Address allignment reequirement for the dma buffer address
+#define DMA_ADDRESS_ALIGNMENT 4
+
+// Transfer size allignment for the dma transfer.
+#define DMA_TRANSFER_SIZE_ALIGNMENT 4
+
+// Dma transfer request depth for initial req depth
+#define DMA_TRANSFER_REQ_DEPTH 16
+
+// The end index of the list
+#define DMA_NULL_INDEX 0xFFFF
+
+// Defines the dma request states.
+typedef enum
+{
+ // The request has not been started.
+ RmDmaRequestState_NotStarted = 0x1,
+
+ // The request is running state.
+ RmDmaRequestState_Running ,
+
+ // The request is completed state.
+ RmDmaRequestState_Completed ,
+
+ // The request is stopped state.
+ RmDmaRequestState_Stopped,
+
+ // The request is unused state.
+ RmDmaRequestState_Unused,
+
+ RmDmaRequestState_Force32 = 0x7FFFFFFF
+} RmDmaRequestState;
+
+// Defines the dma channel allocation state.
+typedef enum
+{
+ // Dma channel is free and available for the allocation.
+ RmDmaChannelState_Free = 0x1,
+
+ // The dma channel is free from the client but still it has the request
+ // for the data transfer.
+ RmDmaChannelState_MarkedFree ,
+
+ // Dma channel is used by the client.
+ RmDmaChannelState_Used,
+
+ RmDmaChannelState_Force32 = 0x7FFFFFFF
+} RmDmaChannelState;
+
+// Defines the dma channel transfer mode and property.
+typedef enum
+{
+ // initial value of the states.
+ RmDmaTransferMode_Init = 0x0,
+
+ // Dma channel transfer mode is continuous.
+ RmDmaTransferMode_Continuous = 0x1,
+
+ // Dma channel transfer mode is Double buffering.
+ RmDmaTransferMode_DoubleBuff = 0x2,
+
+ // Dma channel transfer mode is to transfer the same buffer afain and again.
+ RmDmaTransferMode_SameBuff = 0x4,
+
+ // Dma channel transfer where source address is the Xmb address.
+ RmDmaTransferMode_SourceXmb = 0x8,
+
+ // Dma channel transfer where source address is the Peripheral address.
+ RmDmaTransferMode_SourcePeripheral = 0x10,
+
+ // Dma channel transfer request is asynchrnous.
+ RmDmaTransferMode_Asynch = 0x20,
+
+ // Dma channel transfer is for the pin interrupt now.
+ RmDmaTransferMode_PingIntMode = 0x40,
+
+ RmDmaTransferMode_Force32 = 0x7FFFFFFF
+} RmDmaTransferMode;
+
+/**
+ * Combines the Dma transfer request information which will be queued and
+ * require to start the transfer and for notification after transfer completes.
+ */
+typedef struct DmaTransReqRec
+{
+ // Unique Id
+ NvU32 UniqueId;
+
+ // Current state of the channel.
+ RmDmaRequestState State;
+
+ // The dema request transfer mode and details of the request.
+ RmDmaTransferMode TransferMode;
+
+ // The Source address for the data transfer.
+ NvRmPhysAddr SourceAdd;
+
+ // The destiniation address for the data transfer.
+ NvRmPhysAddr DestAdd;
+
+ // The source address wrapping.
+ NvU32 SourceAddWrap;
+
+ // The destination address wrapping.
+ NvU32 DestAddWrap;
+
+ // Number of bytes requested.
+ NvU32 BytesRequested;
+
+ // Number of bytes programmed for current data transfer.
+ NvU32 BytesCurrProgram;
+
+ // Number of bytes remaining to transfer.
+ NvU32 BytesRemaining;
+
+ // The configuartion of dma in terms of register content and channel
+ // register info.
+ DmaChanRegisters DmaChanRegs;
+
+ // Semaphore Id which need to be signalled after completion.
+ NvOsSemaphoreHandle hOnDmaCompleteSema;
+
+ // Semaphore Id which need to be signalled after half of the transfer
+ // completion.
+ NvOsSemaphoreHandle hOnHalfDmaCompleteSema;
+
+ // Semaphore Id which need to be destoyed when new request will be placed
+ // by this list memory.
+ NvOsSemaphoreHandle hLastReqSema;
+
+ // Array based the double link list.
+ NvU16 NextIndex;
+
+ NvU16 PrevIndex;
+
+} DmaTransReq;
+
+/**
+ * Combines the channel information, status, requestor information for the
+ * channel dma, type of dma etc.
+ */
+typedef struct RmDmaChannelRec
+{
+ // State of the channel.
+ RmDmaChannelState ChannelState;
+
+ // Dma priority whether this is low priority channel or high prority
+ // channel.
+ NvRmDmaPriority Priority;
+
+ // Pointer to the list of the transfer request.
+ struct DmaTransReqRec *pTransReqList;
+
+ // Currently maximum request possible.
+ NvU16 MaxReqList;
+
+ // Head index to the request
+ NvU16 HeadReqIndex;
+
+ // Tail Index to the request
+ NvU16 TailReqIndex;
+
+ // Head index to the free list.
+ NvU16 HeadFreeIndex;
+
+ // Mutex to provide the thread/interrupt safety for the channel specific
+ // data.
+ NvOsIntrMutexHandle hIntrMutex;
+
+ // The virtual base address of the channel registers.
+ NvU32 *pVirtChannelAdd;
+
+ // Channel address bank size.
+ NvU32 ChannelAddBankSize;
+
+ // Pointer to the dma hw interface apis strcuture.
+ DmaHwInterface *pHwInterface;
+
+ // Log the last requested size
+ NvU32 LastReqSize;
+
+#if NVOS_IS_LINUX
+ // Channel interrupt handle
+ NvOsInterruptHandle hIntrHandle;
+#endif
+
+} RmDmaChannel, *RmDmaChannelHandle;
+
+/**
+ * Combines the dma information
+ */
+typedef struct
+{
+ // Device handle.
+ NvRmDeviceHandle hDevice;
+
+ // Actual numbers of Apb dma channels available on the soc.
+ NvU32 NumApbDmaChannels;
+
+ RmDmaChannel *pListApbDmaChannel;
+
+ // Apb Dma General registers
+ DmaGenRegisters ApbDmaGenReg;
+
+ // OS mutex for channel allocation and deallocation: provide thread safety
+ NvOsMutexHandle hDmaAllocMutex;
+} NvRmPrivDmaInfo;
+
+/**
+ * Combines the Dma requestor and related information which is required for
+ * other dma operation request.
+ */
+typedef struct NvRmDmaRec
+{
+ // Store the Rm device handle
+ NvRmDeviceHandle hRmDevice;
+
+ // Corresponding dma channel pointer to APB dma for this handle.
+ RmDmaChannel *pDmaChannel;
+
+ // Flag to tells whether 32 bit swap is enabled or not.
+ NvBool IsBitSwapEnable;
+
+ // Unique Id
+ NvU32 UniqueId;
+
+ // Dma requestor module Id.
+ NvRmDmaModuleID DmaReqModuleId;
+
+ // dma requestor instance Id.
+ NvU32 DmaReqInstId;
+
+ // Dma register information which contain the configuration for dma when it
+ // was allocated
+ DmaChanRegisters DmaChRegs;
+
+ // NvOs semaphore which will be used when synchrnous operation is requested.
+ NvOsSemaphoreHandle hSyncSema;
+} NvRmDma;
+
+static NvRmPrivDmaInfo s_DmaInfo;
+static DmaHwInterface s_ApbDmaInterface;
+#if !NVOS_IS_LINUX
+static NvOsInterruptHandle s_ApbDmaInterruptHandle = NULL;
+#endif
+
+/**
+ * Deinitialize the apb dma physical/virtual addresses. This function will
+ * unmap the virtual mapping.
+ *
+ * Thread Safety: Caller responsibility.
+ */
+static void DeInitDmaGeneralHwRegsAddress(void)
+{
+ // Unmap the virtual mapping for apb general register.
+ NvRmPhysicalMemUnmap(s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd,
+ s_DmaInfo.ApbDmaGenReg.GenAddBankSize);
+ s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd = NULL;
+}
+
+/**
+ * Initialize the apb dma physical/virtual addresses. This function will get
+ * the physical address of Apb dma channel from Nvrm module APIs, get the
+ * virtual address.
+ *
+ * Thread Safety: Caller responsibility.
+ */
+static NvError InitDmaGeneralHwRegsAddress(void)
+{
+ NvError Error = NvSuccess;
+ NvRmDeviceHandle hDevice = NULL;
+ NvRmModuleID ModuleId;
+ NvRmPhysAddr ApbPhysAddr;
+
+ // Required the valid device handles.
+ hDevice = s_DmaInfo.hDevice;
+
+ // Get the physical base address of the apb dma controller general register.
+ ModuleId = NVRM_MODULE_ID(NvRmPrivModuleID_ApbDma, 0);
+ NvRmModuleGetBaseAddress(hDevice, ModuleId,
+ &ApbPhysAddr, &s_DmaInfo.ApbDmaGenReg.GenAddBankSize);
+
+ // Initialize the apb dma register virtual address.
+ s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd = NULL;
+
+ // Get the virtual address of apb dma general base address.
+ Error = NvRmPhysicalMemMap(ApbPhysAddr,
+ s_DmaInfo.ApbDmaGenReg.GenAddBankSize, NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached,
+ (void **)&s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd);
+
+ return Error;
+}
+
+static NvError AllocateReqList(RmDmaChannel *pDmaChannel, NvU16 MoreListSize)
+{
+ NvU16 Index;
+ DmaTransReq *pTransReqList = NULL;
+ DmaTransReq *pExistTransReqList = pDmaChannel->pTransReqList;
+ NvU32 TotalReqSize = (pDmaChannel->MaxReqList + MoreListSize);
+
+ // Allocate the memory for logging the client requests.
+ pTransReqList = NvOsAlloc(TotalReqSize * sizeof(DmaTransReq));
+ if (!pTransReqList)
+ return NvError_InsufficientMemory;
+
+ NvOsMemset(pTransReqList, 0, TotalReqSize * sizeof(DmaTransReq));
+
+ // Copy the existing request if it exist to the new allocated request list.
+ if (pExistTransReqList)
+ {
+ NvOsMemcpy(pTransReqList, pExistTransReqList,
+ pDmaChannel->MaxReqList * sizeof(DmaTransReq));
+ NvOsFree(pExistTransReqList);
+ }
+
+ for (Index = pDmaChannel->MaxReqList; Index < TotalReqSize; ++Index)
+ {
+ if (Index == pDmaChannel->MaxReqList)
+ pTransReqList[pDmaChannel->MaxReqList].PrevIndex = DMA_NULL_INDEX;
+ else
+ pTransReqList[Index].PrevIndex = Index-1;
+
+ pTransReqList[Index].NextIndex = Index + 1;
+ }
+ pTransReqList[Index-1].NextIndex = DMA_NULL_INDEX;
+ pDmaChannel->pTransReqList = pTransReqList;
+ pDmaChannel->HeadFreeIndex = pDmaChannel->MaxReqList;
+ pDmaChannel->MaxReqList += MoreListSize;
+ return NvSuccess;
+}
+
+/**
+ * Deinitialize the Apb dma channels. It will free all the memory and resource
+ * allocated for the dma channels.
+ *
+ * Thread Safety: Caller responsibility.
+ */
+static void DeInitDmaChannels(RmDmaChannel *pDmaList, NvU32 TotalChannel)
+{
+ NvU32 i;
+ if (!pDmaList)
+ return;
+
+ for (i = 0; i < TotalChannel; i++)
+ {
+ RmDmaChannel *pDmaChannel = &pDmaList[i];
+ if (pDmaChannel)
+ {
+ NvOsFree(pDmaChannel->pTransReqList);
+ pDmaChannel->MaxReqList = 0;
+
+ // Free the dma virtual maping
+ NvRmPhysicalMemUnmap(pDmaChannel->pVirtChannelAdd,
+ pDmaChannel->ChannelAddBankSize);
+ NvOsIntrMutexDestroy(pDmaChannel->hIntrMutex);
+ }
+ }
+ NvOsFree(pDmaList);
+}
+
+/**
+ * Init Apb dma channels.It makes the list of all available dma channesl and
+ * keep in the free channel list so that it will be available for the
+ * allocation.
+ * Once client ask for dma channel, it will look in the free list and remove the
+ * channel from the free list and attach with the dma handle and keep in the
+ * used list. The client data trasfer request is queued for the dma channels.
+ *
+ * Thread Safety: Caller responsibility.
+ */
+static NvError
+InitDmaChannels(
+ NvRmDeviceHandle hDevice,
+ RmDmaChannel **pDmaChannelList,
+ NvU32 TotalChannel,
+
+ NvRmModuleID DmaModuleId)
+{
+ NvU32 ChanIndex;
+ NvError Error = NvSuccess;
+ RmDmaChannel *pDmaChannel = NULL;
+ NvRmModuleID ModuleId = 0;
+ NvRmPhysAddr ChannelPhysAddr;
+ RmDmaChannel *pDmaList = NULL;
+
+ // Allocate the memory to store the all dma channel information.
+ pDmaList = NvOsAlloc(TotalChannel * sizeof(RmDmaChannel));
+ if (!pDmaList)
+ return NvError_InsufficientMemory;
+
+ // Initialize all dma channel structure with default values.
+ for (ChanIndex = 0; ChanIndex < TotalChannel; ++ChanIndex)
+ {
+ pDmaChannel = &pDmaList[ChanIndex];
+
+ // Initialize all channel member to the initial known states.
+ pDmaChannel->ChannelState = RmDmaChannelState_Free;
+ pDmaChannel->Priority = NvRmDmaPriority_High;
+ pDmaChannel->pTransReqList = NULL;
+ pDmaChannel->MaxReqList = 0;
+ pDmaChannel->HeadReqIndex = DMA_NULL_INDEX;
+ pDmaChannel->TailReqIndex = DMA_NULL_INDEX;
+ pDmaChannel->HeadFreeIndex = DMA_NULL_INDEX;
+ pDmaChannel->hIntrMutex = NULL;
+ pDmaChannel->pVirtChannelAdd = NULL;
+ pDmaChannel->ChannelAddBankSize = 0;
+ pDmaChannel->pHwInterface = &s_ApbDmaInterface;
+ }
+
+ // Allocate the resource and register address for each channels.
+ for (ChanIndex = 0; ChanIndex < TotalChannel; ++ChanIndex)
+ {
+ pDmaChannel = &pDmaList[ChanIndex];
+
+ // Allocate the memory for logging the client request.
+ Error = AllocateReqList(pDmaChannel, DMA_TRANSFER_REQ_DEPTH);
+
+ // Create mutex for the channel access.
+ if (!Error)
+ Error = NvOsIntrMutexCreate(&pDmaChannel->hIntrMutex);
+
+ // Initialize the base address of the channel.
+ if (!Error)
+ {
+ ModuleId = NVRM_MODULE_ID(DmaModuleId, ChanIndex);
+ NvRmModuleGetBaseAddress(hDevice, ModuleId, &ChannelPhysAddr,
+ &pDmaChannel->ChannelAddBankSize);
+ Error = NvRmPhysicalMemMap(ChannelPhysAddr,
+ pDmaChannel->ChannelAddBankSize, NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached,
+ (void **)&pDmaChannel->pVirtChannelAdd);
+ }
+ if (Error)
+ break;
+ }
+
+ if (!Error)
+ {
+ // Allocate last channel as a low priority request, others are
+ // high priority channel
+ *pDmaChannelList = (RmDmaChannel *)pDmaList;
+ }
+ else
+ {
+ DeInitDmaChannels(pDmaList, TotalChannel);
+ *pDmaChannelList = (RmDmaChannel *)NULL;
+ }
+ return Error;
+}
+
+/**
+ * Initialize the Apb dma channels.
+ * Thread Safety: Caller responsibility.
+ */
+static NvError InitAllDmaChannels(void)
+{
+ NvError Error = NvSuccess;
+
+ // Initialize the apb dma channel list.
+ Error = InitDmaChannels(s_DmaInfo.hDevice, &s_DmaInfo.pListApbDmaChannel,
+ s_DmaInfo.NumApbDmaChannels, NvRmPrivModuleID_ApbDmaChannel);
+ return Error;
+}
+
+/**
+ * Deinitialize the Apb dma channels.
+ * Thread Safety: Caller responsibility.
+ */
+static void DeInitAllDmaChannels(void)
+{
+ // Deinitialize the apb dma channels.
+ DeInitDmaChannels(s_DmaInfo.pListApbDmaChannel, s_DmaInfo.NumApbDmaChannels);
+ s_DmaInfo.pListApbDmaChannel = NULL;
+}
+
+/**
+ * DeInitialize the Dmas. It include the deinitializaton of Apb dma channels.
+ * It unmap the dma register address, disable clock of dma, reset the dma,
+ * destroy the dma interrupt threads and destroy the list of all channels.
+ *
+ * Thread Safety: Caller responsibility.
+ */
+static void DeInitDmas(void)
+{
+ // Global disable the dma channels.
+ s_ApbDmaInterface.DmaHwGlobalSetFxn(s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd,
+ NV_FALSE);
+
+ // Disable the dma clocks.
+ // Disable clock for the apb dma channels.
+ (void)NvRmPowerModuleClockControl(s_DmaInfo.hDevice, NvRmPrivModuleID_ApbDma,
+ 0, NV_FALSE);
+
+ // De-Initialize of the dma channel lists.
+ DeInitAllDmaChannels();
+}
+
+/**
+ * Initialize the Dma. It include the initializaton of Apb dma channels.
+ * It initalize the dma register address, clock of dma, do the reset of dma,
+ * create the dma interrupt threads and make the list of all channels
+ * for allocation.
+ *
+ * Thread Safety: Caller responsibility.
+ */
+static NvError InitDmas(NvRmDeviceHandle hRmDevice)
+{
+ NvError Error = NvSuccess;
+
+ // Initialize of the dma channel lists.
+ Error = InitAllDmaChannels();
+
+ // Enable the clocks of dma channels.
+ if (!Error)
+ Error = NvRmPowerModuleClockControl(hRmDevice, NvRmPrivModuleID_ApbDma,
+ 0, NV_TRUE);
+ // Reset the dma channels.
+ if (!Error)
+ NvRmModuleReset(hRmDevice, NVRM_MODULE_ID(NvRmPrivModuleID_ApbDma, 0));
+
+ // Global enable the dma channels.
+ if (!Error)
+ s_ApbDmaInterface.DmaHwGlobalSetFxn(s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd,
+ NV_TRUE);
+
+ // If error exist then disable the dma clocks.
+ if (Error)
+ DeInitDmas();
+
+ return Error;
+}
+
+
+/**
+ * Continue the current transfer by sending the next chunk of the data from the
+ * current dma transfer request. This may be called when requested size is
+ * larger than the supported dma transfer size in single go by hw.
+ *
+ */
+static void ApbDmaContinueRemainingTransfer(void *pDmaChan)
+{
+ NvU32 CurrProgSize;
+ NvU32 LastTransferSize;
+ DmaTransReq *pCurrReq = NULL;
+ NvBool IsDoubleBuff;
+ NvBool IsContMode;
+ RmDmaChannel *pDmaChannel = (RmDmaChannel *)pDmaChan;
+
+ pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex];
+
+ // Get the last transfer size in bytes from the start of the source and
+ // destination address
+ LastTransferSize = pCurrReq->BytesCurrProgram;
+
+ // Calculate the possible transfer size based on remaining bytes and
+ // maximum transfer size. Updates the remaining size, transfer size and
+ // programmed size accordingly.
+ CurrProgSize = NV_MIN(pCurrReq->BytesRemaining, DMA_MAX_TRANSFER_SIZE);
+
+ IsDoubleBuff = (pCurrReq->TransferMode & RmDmaTransferMode_DoubleBuff)? NV_TRUE: NV_FALSE;
+ IsContMode = (pCurrReq->TransferMode & RmDmaTransferMode_Continuous)? NV_TRUE: NV_FALSE;
+
+ // Program the transfer size.
+ pDmaChannel->pHwInterface->DmaHwSetTransferSizeFxn(&pCurrReq->DmaChanRegs,
+ CurrProgSize, IsDoubleBuff);
+ pDmaChannel->pHwInterface->DmaHwStartTransferWithAddIncFxn(
+ &pCurrReq->DmaChanRegs, 0, LastTransferSize, IsContMode);
+
+ // Update the parameter which will be used in future.
+ pCurrReq->BytesRemaining -= CurrProgSize;
+ pCurrReq->BytesCurrProgram = CurrProgSize;
+}
+
+
+/**
+ * Handle the dma complete interrupt in once mode.
+ *
+ * Thread Safety: Caller responsibility.
+ */
+static void
+OnDmaCompleteInOnceMode(
+ RmDmaChannel *pDmaChannel,
+ DmaTransReq *pCurrReq)
+{
+ NvOsSemaphoreHandle hSignalSema = NULL;
+ NvU16 CurrHeadIndex;
+
+ pDmaChannel->pHwInterface->DmaHwAckNClearInterruptFxn(&pCurrReq->DmaChanRegs);
+
+ // The transfer was in running state.
+ // Check if there is data remaining to transfer or not from the
+ // current request. If there is bytes remaining for data transfer
+ // then continue the transfer.
+ if (pCurrReq->BytesRemaining)
+ {
+ pDmaChannel->pHwInterface->DmaContinueRemainingTransferFxn(pDmaChannel);
+ return;
+ }
+
+ pCurrReq->State = RmDmaRequestState_Completed;
+
+ // Store the sempahore whihc need to be signal.
+ hSignalSema = pCurrReq->hOnDmaCompleteSema;
+ pDmaChannel->LastReqSize = pCurrReq->BytesRequested;
+
+ // Free this index.
+ CurrHeadIndex = pDmaChannel->HeadReqIndex;
+ pDmaChannel->HeadReqIndex = pDmaChannel->pTransReqList[CurrHeadIndex].NextIndex;
+ pDmaChannel->pTransReqList[CurrHeadIndex].NextIndex = pDmaChannel->HeadFreeIndex;
+ pDmaChannel->HeadFreeIndex = CurrHeadIndex;
+ if (pDmaChannel->HeadReqIndex == DMA_NULL_INDEX)
+ {
+ pDmaChannel->TailReqIndex = DMA_NULL_INDEX;
+
+ // If channel is marked as free by client then make this channel
+ // for next allocation.
+ if (pDmaChannel->ChannelState == RmDmaChannelState_MarkedFree)
+ pDmaChannel->ChannelState = RmDmaChannelState_Free;
+
+ // Notify the client for the data transfers completes.
+ if (hSignalSema)
+ NvOsSemaphoreSignal(hSignalSema);
+ return;
+ }
+ pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex];
+ pCurrReq->State = RmDmaRequestState_Running;
+ pDmaChannel->pHwInterface->DmaHwStartTransferFxn(&pCurrReq->DmaChanRegs);
+
+ // Generate the notification for the current transfer completes.
+ if (hSignalSema)
+ NvOsSemaphoreSignal(hSignalSema);
+}
+
+static void
+OnDmaCompleteInContinuousMode(
+ RmDmaChannel *pDmaChannel,
+ DmaTransReq *pCurrReq)
+{
+ NvOsSemaphoreHandle hSignalSema = NULL;
+ NvU16 NextHeadIndex;
+ DmaTransReq *pNextReq = NULL;
+
+ pDmaChannel->pHwInterface->DmaHwAckNClearInterruptFxn(&pCurrReq->DmaChanRegs);
+
+ // The transfer was in running state.
+ // Check if there is data remaining to transfer or not from the
+ // current request. If there is bytes remaining for data transfer
+ // then continue the transfer.
+ if (pCurrReq->BytesRemaining)
+ {
+ if (pCurrReq->TransferMode & RmDmaTransferMode_PingIntMode)
+ {
+ pCurrReq->TransferMode &= ~RmDmaTransferMode_PingIntMode;
+ pDmaChannel->pHwInterface->DmaContinueRemainingTransferFxn(pDmaChannel);
+ }
+ else
+ {
+ pCurrReq->TransferMode |= RmDmaTransferMode_PingIntMode;
+ }
+ return;
+ }
+
+ NextHeadIndex = pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex].NextIndex;
+ if (NextHeadIndex != DMA_NULL_INDEX)
+ pNextReq = &pDmaChannel->pTransReqList[NextHeadIndex];
+
+ if (pCurrReq->TransferMode & RmDmaTransferMode_PingIntMode)
+ {
+ if (NextHeadIndex != DMA_NULL_INDEX)
+ {
+ pDmaChannel->pHwInterface->DmaHwContinueTransferFxn(&pNextReq->DmaChanRegs);
+ pNextReq->State = RmDmaRequestState_Running;
+ pNextReq->TransferMode |= RmDmaTransferMode_PingIntMode;
+ }
+ pDmaChannel->pHwInterface->DmaHwAddTransferCountFxn(&pCurrReq->DmaChanRegs);
+
+ if (pCurrReq->hOnHalfDmaCompleteSema)
+ NvOsSemaphoreSignal(pCurrReq->hOnHalfDmaCompleteSema);
+
+
+ pCurrReq->TransferMode &= ~RmDmaTransferMode_PingIntMode;
+ return;
+ }
+
+ pCurrReq->State = RmDmaRequestState_Completed;
+
+ // Store the sempahore which need to be signal.
+ hSignalSema = pCurrReq->hOnDmaCompleteSema;
+
+ if (!pNextReq)
+ {
+ if (pCurrReq->TransferMode & RmDmaTransferMode_SameBuff)
+ {
+ pCurrReq->TransferMode |= RmDmaTransferMode_PingIntMode;
+ pCurrReq->State = RmDmaRequestState_Running;
+ if (hSignalSema)
+ NvOsSemaphoreSignal(pCurrReq->hOnDmaCompleteSema);
+ pDmaChannel->pHwInterface->DmaHwAddTransferCountFxn(&pCurrReq->DmaChanRegs);
+ return;
+ }
+ else
+ {
+ pDmaChannel->pHwInterface->DmaHwStopTransferFxn(&pCurrReq->DmaChanRegs);
+ pDmaChannel->HeadReqIndex = DMA_NULL_INDEX;
+ pDmaChannel->TailReqIndex = DMA_NULL_INDEX;
+
+ // If channel is marked as free then make this channel available
+ // for next allocation.
+ if (pDmaChannel->ChannelState == RmDmaChannelState_MarkedFree)
+ pDmaChannel->ChannelState = RmDmaChannelState_Free;
+ }
+ }
+ else
+ {
+ pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex].NextIndex = pDmaChannel->HeadFreeIndex;
+ pDmaChannel->HeadFreeIndex = pDmaChannel->HeadReqIndex;
+ pDmaChannel->HeadReqIndex = NextHeadIndex;
+
+ // May be we got this request after ping buffer completion.
+ if (pNextReq->State != RmDmaRequestState_Running)
+ {
+ // Start the next request transfer.
+ pDmaChannel->pHwInterface->DmaHwContinueTransferFxn(&pNextReq->DmaChanRegs);
+ pNextReq->State = RmDmaRequestState_Running;
+ pCurrReq->TransferMode |= RmDmaTransferMode_PingIntMode;
+ }
+ }
+
+ // Generate the notification for the current transfer completes.
+ if (hSignalSema)
+ NvOsSemaphoreSignal(hSignalSema);
+}
+
+
+
+#if NVOS_IS_LINUX
+/**
+ * Handle the Apb dma interrupt.
+ */
+static void ApbDmaIsr(void *args)
+{
+ RmDmaChannel *pDmaChannel = (RmDmaChannel *)args;
+ DmaTransReq *pCurrReq;
+ NvBool IsTransferComplete;
+
+ NvOsIntrMutexLock(pDmaChannel->hIntrMutex);
+ if (pDmaChannel->HeadReqIndex == DMA_NULL_INDEX)
+ goto exit;
+
+ pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex];
+ if (pCurrReq->State != RmDmaRequestState_Running)
+ goto exit;
+
+ IsTransferComplete = pDmaChannel->pHwInterface->DmaHwIsTransferCompletedFxn(
+ &pCurrReq->DmaChanRegs);
+ if (IsTransferComplete) {
+ if (pCurrReq->TransferMode & RmDmaTransferMode_Continuous)
+ OnDmaCompleteInContinuousMode(pDmaChannel, pCurrReq);
+ else
+ OnDmaCompleteInOnceMode(pDmaChannel, pCurrReq);
+ }
+
+exit:
+ NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
+ NvRmInterruptDone(pDmaChannel->hIntrHandle);
+}
+#else
+static void ApbDmaIsr(void *args)
+{
+ RmDmaChannel *pDmaChannel;
+ DmaTransReq *pCurrReq;
+ NvU32 ChanIndex;
+ NvBool IsTransferComplete;
+
+ for (ChanIndex = 0; ChanIndex < s_DmaInfo.NumApbDmaChannels; ++ChanIndex)
+ {
+ pDmaChannel = &s_DmaInfo.pListApbDmaChannel[ChanIndex];
+ if (pDmaChannel->HeadReqIndex == DMA_NULL_INDEX)
+ continue;
+
+ NvOsIntrMutexLock(pDmaChannel->hIntrMutex);
+ if (pDmaChannel->HeadReqIndex == DMA_NULL_INDEX)
+ goto NextLoop;
+
+ pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex];
+ if (pCurrReq->State != RmDmaRequestState_Running)
+ goto NextLoop;
+
+ IsTransferComplete = pDmaChannel->pHwInterface->DmaHwIsTransferCompletedFxn(
+ &pCurrReq->DmaChanRegs);
+ if (!IsTransferComplete)
+ goto NextLoop;
+
+ if (pCurrReq->TransferMode & RmDmaTransferMode_Continuous)
+ OnDmaCompleteInContinuousMode(pDmaChannel, pCurrReq);
+ else
+ OnDmaCompleteInOnceMode(pDmaChannel, pCurrReq);
+
+ NextLoop:
+ NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
+ }
+
+ NvRmInterruptDone(s_ApbDmaInterruptHandle);
+}
+#endif
+
+
+/**
+ * Register apb Dma interrupt.
+ */
+static NvError RegisterAllDmaInterrupt(NvRmDeviceHandle hDevice)
+{
+ NvRmModuleID ModuleId = NvRmPrivModuleID_ApbDma;
+ NvError Error = NvSuccess;
+ NvOsInterruptHandler DmaIntHandler = ApbDmaIsr;
+ NvU32 Irq = 0;
+ NvU32 i;
+
+ /* Disable interrupts for all channels */
+ for (i=0; i < s_DmaInfo.NumApbDmaChannels; i++)
+ {
+ NvRmPrivDmaInterruptEnable(hDevice, i, NV_FALSE);
+ }
+
+#if NVOS_IS_LINUX
+ /* Register same interrupt hanlder for all APB DMA channels. */
+ for (i=0; i < s_DmaInfo.NumApbDmaChannels; i++)
+ {
+ Irq = NvRmGetIrqForLogicalInterrupt(hDevice, ModuleId, i);
+ Error = NvRmInterruptRegister(hDevice, 1, &Irq,
+ &DmaIntHandler, &s_DmaInfo.pListApbDmaChannel[i],
+ &(s_DmaInfo.pListApbDmaChannel[i].hIntrHandle), NV_TRUE);
+ }
+#else
+ /* Register one interrupt handler for all APB DMA channels
+ * Pass index 0xFF to get the main IRQ of the ADB DMA sub-interrupt
+ * controller. */
+ Irq = NvRmGetIrqForLogicalInterrupt(hDevice, ModuleId, 0xFF);
+ Error = NvRmInterruptRegister(hDevice, 1, &Irq,
+ &DmaIntHandler, hDevice, &s_ApbDmaInterruptHandle, NV_TRUE);
+
+#endif
+
+ if (Error != NvSuccess) return Error;
+
+ /* Enable interrupts for all channels */
+ for (i=0; i < s_DmaInfo.NumApbDmaChannels; i++)
+ {
+ NvRmPrivDmaInterruptEnable(hDevice, i, NV_TRUE);
+ }
+ return Error;
+}
+
+/**
+ * Unregister apb Dma interrupts.
+ */
+static void UnregisterAllDmaInterrupt(NvRmDeviceHandle hDevice)
+{
+#if NVOS_IS_LINUX
+ int i;
+
+ for (i=0; i < s_DmaInfo.NumApbDmaChannels; i++)
+ {
+ NvRmInterruptUnregister(hDevice, s_DmaInfo.pListApbDmaChannel[i].hIntrHandle);
+ }
+#else
+ NvRmInterruptUnregister(hDevice, s_ApbDmaInterruptHandle);
+ s_ApbDmaInterruptHandle = NULL;
+#endif
+}
+
+/**
+ * Destroy the dma informations. It releases all the memory and os resources
+ * which was allocated to create the dma infomation.
+ * PENDING: What happen if there is a request for data transfer and it is ask
+ * for the DeInit().
+ *
+ */
+static void DestroyDmaInfo(void)
+{
+ // Unregister for the dma interrupts.
+ UnregisterAllDmaInterrupt(s_DmaInfo.hDevice);
+
+ // Deinitialize the dmas.
+ DeInitDmas();
+
+ // Destroy the list of dma channels and release memory for all dma channels.
+ NvOsMutexDestroy(s_DmaInfo.hDmaAllocMutex);
+ s_DmaInfo.hDmaAllocMutex = NULL;
+
+ //Deinitialize the dma hw register address.
+ DeInitDmaGeneralHwRegsAddress();
+}
+
+/**
+ * Create the dma information and setup the dma channesl to their initial state.
+ * It enables all dma channels, make list of dma channels, initailize the
+ * registes address, create reosurces for the channel allocation and bring the
+ * dma driver in know states.
+ *
+ * It creates all the mutex which are used for dma channel, register the
+ * interrupt, enable the clock and reset the dma channel.
+ *
+ * Verification of al the steps is done and if it fails then it relases the
+ * resource which were created and it will return error.
+ *
+ */
+static NvError CreateDmaInfo(NvRmDeviceHandle hDevice)
+{
+ NvError Error = NvSuccess;
+
+ s_DmaInfo.hDevice = hDevice;
+ s_DmaInfo.NumApbDmaChannels =
+ NvRmModuleGetNumInstances(hDevice, NvRmPrivModuleID_ApbDmaChannel);
+
+ NV_ASSERT(s_DmaInfo.NumApbDmaChannels > 0);
+ NV_ASSERT(s_DmaInfo.NumApbDmaChannels <= MAX_APB_DMA_CHANNELS);
+
+ // Initialize the dma hw register addresses.
+ Error = InitDmaGeneralHwRegsAddress();
+
+ // Initialze the channel alllocation mutex.
+ if (!Error)
+ Error = NvOsMutexCreate(&s_DmaInfo.hDmaAllocMutex);
+
+ // Initialze the dma channels.
+ if (!Error)
+ Error = InitDmas(hDevice);
+
+ // Register for the dma interrupts.
+ if (!Error)
+ Error = RegisterAllDmaInterrupt(hDevice);
+
+ if (Error)
+ DestroyDmaInfo();
+ return Error;
+}
+
+/**
+ * Start the dma transfer from the head request of the dma channels.
+ * Thread Safety: Caller responsibilty.
+ */
+static void StartDmaTransfer(RmDmaChannel *pDmaChannel)
+{
+ DmaTransReq *pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex];
+
+ // The state of the transfer will be running state.
+ pCurrReq->State = RmDmaRequestState_Running;
+
+ // Start the dma transfer.
+ pDmaChannel->pHwInterface->DmaHwStartTransferFxn(&pCurrReq->DmaChanRegs);
+}
+
+/**
+ * Stop the current transfer on dma channel immediately.
+ *
+ * Thread Safety: It is caller responsibility.
+ */
+static void StopDmaTransfer(RmDmaChannel *pDmaChannel)
+{
+ // Get the curent request of the dma channel.
+ DmaTransReq *pCurrReq = NULL;
+ if (pDmaChannel->HeadReqIndex != DMA_NULL_INDEX)
+ {
+ pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex];
+ if (pCurrReq->State == RmDmaRequestState_Running)
+ {
+ pDmaChannel->pHwInterface->DmaHwStopTransferFxn(&pCurrReq->DmaChanRegs);
+ pCurrReq->State = RmDmaRequestState_Stopped;
+ }
+ }
+}
+
+/**
+ * Set the mode of the data transfer whether this is once mode or continuous mode
+ * or single buffering or double buffering mode.
+ */
+static void
+SetApbDmaSpecialTransferMode(
+ NvRmDmaHandle hDma,
+ NvBool IsSourceAddPerip,
+ DmaTransReq *pCurrReq)
+{
+ // Special mode of dma transfer is not supported for the low priority channel.
+ if (hDma->pDmaChannel->Priority == NvRmDmaPriority_Low)
+ return;
+
+ // For I2s the continuous double buffering is selected.
+ if (hDma->DmaReqModuleId == NvRmDmaModuleID_I2s ||
+ hDma->DmaReqModuleId == NvRmDmaModuleID_Spdif)
+ {
+ pCurrReq->TransferMode |= (RmDmaTransferMode_Continuous |
+ RmDmaTransferMode_DoubleBuff);
+ hDma->pDmaChannel->pHwInterface->DmaHwSetTransferModeFxn(
+ &pCurrReq->DmaChanRegs, NV_TRUE, NV_TRUE);
+ pCurrReq->hOnHalfDmaCompleteSema = NULL;
+ return;
+ }
+
+ // For Uart only receive mode is supported in the continuous double transfer
+ if ((hDma->DmaReqModuleId == NvRmDmaModuleID_Uart) && (IsSourceAddPerip))
+ {
+ pCurrReq->TransferMode |= (RmDmaTransferMode_Continuous |
+ RmDmaTransferMode_DoubleBuff |
+ RmDmaTransferMode_SameBuff);
+ hDma->pDmaChannel->pHwInterface->DmaHwSetTransferModeFxn(
+ &pCurrReq->DmaChanRegs, NV_TRUE, NV_TRUE);
+ pCurrReq->hOnHalfDmaCompleteSema = pCurrReq->hOnDmaCompleteSema;
+ return;
+ }
+}
+
+/**
+ * Configure the current request of the apb dma transfer into the request
+ * struture.
+ *
+ * It validates the source and destination address for the dma transfers.
+ * It validates the address wrap and get the address wrapping value.
+ * It sets the ahp/apb address as per dma request.
+ * It set the direction of transfer and destination bit swap.
+ *
+ * It break the dma transfer size in multiple transfer if the request transfer
+ * size is more than supported transfer size of one dma transfer.
+ * Thread Safety: Not required as it will not access any shared informations.
+ *
+ */
+static NvError LogApbDmaTransferRequest(NvRmDmaHandle hDma, void *pCurrRequest)
+{
+ NvBool IsSourceAddPerip;
+ NvBool IsDestAddPerip;
+ NvBool IsDoubleBuff;
+ DmaTransReq *pCurrReq = (DmaTransReq *)pCurrRequest;
+
+ // Find which address is the Perip address.
+ IsSourceAddPerip = NvRmPrivDmaHwIsValidPeripheralAddress(pCurrReq->SourceAdd);
+ IsDestAddPerip = NvRmPrivDmaHwIsValidPeripheralAddress(pCurrReq->DestAdd);
+
+ // Only one of the address should be Peripheral address to use the apb dma.
+ if (((IsSourceAddPerip == NV_TRUE) && (IsDestAddPerip == NV_TRUE)) ||
+ ((IsSourceAddPerip == NV_FALSE) && (IsDestAddPerip == NV_FALSE)))
+ {
+ return NvError_NotSupported;
+ }
+
+ if (IsSourceAddPerip)
+ pCurrReq->TransferMode |= RmDmaTransferMode_SourcePeripheral;
+
+ // Configure for address wrapping of the dma register as per source and
+ // destination address wrapping of this transfer request.
+ hDma->pDmaChannel->pHwInterface->DmaHwSetAddressWrappingFxn(
+ &pCurrReq->DmaChanRegs, pCurrReq->SourceAddWrap,
+ pCurrReq->DestAddWrap, pCurrReq->BytesRequested,
+ IsSourceAddPerip);
+
+ // Configure for source and destination address for data transfer.
+ hDma->pDmaChannel->pHwInterface->DmaHwConfigureAddressFxn(
+ &pCurrReq->DmaChanRegs, pCurrReq->SourceAdd,
+ pCurrReq->DestAdd, IsSourceAddPerip);
+
+ // Configure the dma register for direction of transfer as per
+ // source/destination address of this transfer request and dma direction
+ hDma->pDmaChannel->pHwInterface->DmaHwSetDirectionFxn(&pCurrReq->DmaChanRegs,
+ IsSourceAddPerip);
+
+ if (pCurrReq->TransferMode & RmDmaTransferMode_Asynch)
+ SetApbDmaSpecialTransferMode(hDma, IsSourceAddPerip, pCurrReq);
+
+ // Configure the dma register as per the clients byte swap infrmation
+ // It will swap for destination only
+ if (hDma->IsBitSwapEnable)
+ hDma->pDmaChannel->pHwInterface->DmaHwEnableDestBitSwapFxn(
+ &pCurrReq->DmaChanRegs, IsDestAddPerip);
+
+ // Configure the dma register for the burst size. This is calculated based
+ // on the requested transfer size.
+ hDma->pDmaChannel->pHwInterface->DmaHwSetBurstSizeFxn(&pCurrReq->DmaChanRegs,
+ hDma->DmaReqModuleId, pCurrReq->BytesRequested);
+
+ // Configure the dma register for the transfer bytes count. The requested
+ // transfer size can go on many dma transfer cycles.
+ pCurrReq->BytesCurrProgram = NV_MIN(pCurrReq->BytesRequested, DMA_MAX_TRANSFER_SIZE);
+ pCurrReq->BytesRemaining = pCurrReq->BytesRequested - pCurrReq->BytesCurrProgram;
+
+ IsDoubleBuff = (pCurrReq->TransferMode & RmDmaTransferMode_DoubleBuff)? NV_TRUE: NV_FALSE;
+ hDma->pDmaChannel->pHwInterface->DmaHwSetTransferSizeFxn(&pCurrReq->DmaChanRegs,
+ pCurrReq->BytesCurrProgram, IsDoubleBuff);
+ return NvSuccess;
+}
+
+
+/**
+ * Initialize the NvRm dma informations and allocates all resources.
+ */
+NvError NvRmPrivDmaInit(NvRmDeviceHandle hDevice)
+{
+
+ s_ApbDmaInterface.DmaContinueRemainingTransferFxn = ApbDmaContinueRemainingTransfer;
+ s_ApbDmaInterface.LogDmaTransferRequestFxn = LogApbDmaTransferRequest;
+
+ NvRmPrivDmaInitDmaHwInterfaces(&s_ApbDmaInterface);
+
+ // Create the dma information.
+ return CreateDmaInfo(hDevice);
+}
+
+/**
+ * Deinitialize the NvRm dma informations and frees all resources.
+ */
+void NvRmPrivDmaDeInit(void)
+{
+ DestroyDmaInfo();
+}
+
+
+/**
+ * Get the RmDma capabilities.
+ */
+NvError
+NvRmDmaGetCapabilities(
+ NvRmDeviceHandle hDevice,
+ NvRmDmaCapabilities *pRmDmaCaps)
+{
+ NV_ASSERT(hDevice);
+ NV_ASSERT(pRmDmaCaps);
+ pRmDmaCaps->DmaAddressAlignmentSize = DMA_ADDRESS_ALIGNMENT;
+ pRmDmaCaps->DmaGranularitySize = DMA_TRANSFER_SIZE_ALIGNMENT;
+ return NvSuccess;
+}
+
+/**
+ * Allocate the dma handles.
+ *
+ * Implementation Details:
+ * For high priority dma handle, it allocated from the available free channel.
+ * If there is not the free channel then it reutrns error. The high priority dma
+ * requestor client owns the dma channel. Such channel will not be shared by
+ * other clients.
+ *
+ * For low priority dma handle, it allocates the handle from the low priotity
+ * channel. The allocation of hande only fails if there is unsufficient memory
+ * to allocate the handle. The low priority dma requestor client share the
+ * channel with other clients which is requested for the lower priority dma and
+ * so it can suffer the delayed response.
+ *
+ * Validation of the parameter:
+ * It allocates the memory for the dma handle and if memory allocation fails then
+ * it return error.
+ *
+ * Thread safety: Thread safety is provided by locking the mutex for the dma
+ * data. This will avoid to access the dma data by the other threads. This is
+ * require because it allocate the channel for high priority.
+ *
+ */
+NvError
+NvRmDmaAllocate(
+ NvRmDeviceHandle hRmDevice,
+ NvRmDmaHandle *phDma,
+ NvBool Enable32bitSwap,
+ NvRmDmaPriority Priority,
+ NvRmDmaModuleID DmaRequestorModuleId,
+ NvU32 DmaRequestorInstanceId)
+{
+ NvError Error = NvSuccess;
+
+ NvU32 UniqueId;
+ RmDmaChannel *pDmaChannel = NULL;
+ NvRmDmaHandle hNewDma = NULL;
+ NvU32 MaxChannel;
+ RmDmaChannel *pChannelList = NULL;
+ NvU32 ChanIndex;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(phDma);
+
+ // Do not allow mem->mem DMAs, which use AHB DMA;
+ NV_ASSERT(DmaRequestorModuleId != NvRmDmaModuleID_Memory);
+
+ *phDma = NULL;
+
+ if ((DmaRequestorModuleId == NvRmDmaModuleID_Invalid) ||
+ (DmaRequestorModuleId >= NvRmDmaModuleID_Max))
+ {
+ return NvError_InvalidSourceId;
+ }
+
+ // Create the unique Id for each allocation based on requestors
+ UniqueId = ((DmaRequestorModuleId << 24) | (DmaRequestorInstanceId << 16) |
+ (NvRmDmaModuleID_Memory << 8));
+
+ // Allocate the memory for the new dma handle.
+ hNewDma = NvOsAlloc(sizeof(*hNewDma));
+
+ // If memory allocation fails then it will return error
+ if (!hNewDma)
+ return NvError_InsufficientMemory;
+
+ // Initialize the allocated memory area with 0
+ NvOsMemset(hNewDma, 0, sizeof(*hNewDma));
+
+ // Log all requestor information in the dma handle for future reference.
+ hNewDma->DmaReqModuleId = DmaRequestorModuleId;
+ hNewDma->DmaReqInstId = DmaRequestorInstanceId;
+ hNewDma->IsBitSwapEnable = Enable32bitSwap;
+ hNewDma->hRmDevice = hRmDevice;
+ hNewDma->UniqueId = UniqueId;
+ hNewDma->pDmaChannel = NULL;
+ hNewDma->hSyncSema = NULL;
+
+ // Create the semaphore for synchronous semaphore allocation.
+ Error = NvOsSemaphoreCreate(&hNewDma->hSyncSema, 0);
+
+ // If error the free the allocation and return error.
+ if (Error)
+ goto ErrorExit;
+
+ // Configure the dma channel configuration registers as per requestor.
+ s_ApbDmaInterface.DmaHwInitRegistersFxn(&hNewDma->DmaChRegs,
+ DmaRequestorModuleId, DmaRequestorInstanceId);
+
+ // If it is the high priority dma request then allocate the channel from
+ // free available channel.Otherwise it will return the handle and will
+ // share the channel across the clients. All clients with low priority dma
+ // requestor will use the low priority channel.
+
+ // For high priority dma channel request, use the free channel. And for low
+ // priority channel use the used channel low priority channels.
+ MaxChannel = s_DmaInfo.NumApbDmaChannels - MAX_AVP_DMA_CHANNELS;
+ pChannelList = s_DmaInfo.pListApbDmaChannel;
+
+ // Going to access the data which is shared across the different thread.
+ NvOsMutexLock(s_DmaInfo.hDmaAllocMutex);
+
+ for (ChanIndex = 0; ChanIndex < MaxChannel; ++ChanIndex)
+ {
+ pDmaChannel = &pChannelList[ChanIndex];
+ if ((Priority == pDmaChannel->Priority) && (pDmaChannel->ChannelState == RmDmaChannelState_Free))
+ break;
+ pDmaChannel = NULL;
+ }
+
+ // If the dma channel is null then it is error.
+ if (!pDmaChannel)
+ {
+ NvOsMutexUnlock(s_DmaInfo.hDmaAllocMutex);
+ Error = NvError_DmaChannelNotAvailable;
+ goto ErrorExit;
+ }
+
+ // If got the free channel for the high priority then mark at used.
+ if (NvRmDmaPriority_High == Priority)
+ pDmaChannel->ChannelState = RmDmaChannelState_Used;
+
+ NvOsMutexUnlock(s_DmaInfo.hDmaAllocMutex);
+
+ // Attach the dma channel in the dma handle.
+ hNewDma->pDmaChannel = pDmaChannel;
+ hNewDma->DmaChRegs.pHwDmaChanReg = pDmaChannel->pVirtChannelAdd;
+
+ *phDma = hNewDma;
+ return Error;
+
+ErrorExit:
+ NvOsSemaphoreDestroy(hNewDma->hSyncSema);
+ NvOsFree(hNewDma);
+ return Error;
+}
+
+
+/**
+ * Free the dma handle which is allocated to the user.
+ * Implementation Details:
+ * For high priority dma handle, mark the channel free if it has pending
+ * transfer request. If the there is no pending request then release the channel
+ * and add in the free list so that it will be allocated to the other clients.
+ *
+ * For Low priority dma handle, it deletes the handle only. The low priority dma
+ * requestor does not own the channel so the channel will not be added in the
+ * free list.
+ *
+ * Thread safety: Done inside the functions.
+ *
+ */
+void NvRmDmaFree(NvRmDmaHandle hDma)
+{
+ RmDmaChannel *pDmaChannel = NULL;
+
+ // If it is null handle then return.
+ if (!hDma)
+ return;
+
+ // Get the dma channels.
+ pDmaChannel = hDma->pDmaChannel;
+
+ // For high priority dma handle, mark the channel is free.
+ // For Low priority dma handle, it deletes the handle only. The low priority
+ // dma requestor does not own the channel.
+
+ if (NvRmDmaPriority_High == pDmaChannel->Priority)
+ {
+ // Thread safety: Avoid any request for this channel
+ NvOsIntrMutexLock(pDmaChannel->hIntrMutex);
+
+ // If there is a transfer request then mark channel as free but does not
+ // free the channel now. This channel will be free after last transfer
+ // is done.
+ // If there is no pending transfer request then free this channel
+ // immediately so that it will be available for the next allocation.
+ if (pDmaChannel->HeadReqIndex != DMA_NULL_INDEX)
+ pDmaChannel->ChannelState = RmDmaChannelState_MarkedFree;
+ else
+ {
+ // Thread Safety: Lock the channel allocation data base to avoid the
+ // access by other threads
+ pDmaChannel->ChannelState = RmDmaChannelState_Free;
+ }
+ NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
+ }
+
+ // Release the semaphore created for the synchronous operation.
+ NvOsSemaphoreDestroy(hDma->hSyncSema);
+
+ // Free the dma channels.
+ NvOsFree(hDma);
+}
+
+
+/**
+ * Start the dma transfer. It queued the rwueste if there is already reueet on
+ * the dma channel.
+ * It supports the synchrnous and asynchrnous request both.
+ *
+ * For sync opeartion, it will wait till timeout or till data transfer completes,
+ * whichever happens first.
+ *
+ * For asynch operation it queued the request, start if no data transfer is
+ * going on the channel and return to the caller. This is the caller
+ * resposibility to synchrnoise the request. On completion, it will signal the
+ * semaphore which was passed alongwith request.
+ * If no sempahor is passed then also it queued the request but after
+ * completion it will not signal the semaphore.
+ *
+ * Thread safety: The thread safety is provided inside the function.
+ *
+ */
+
+NvError
+NvRmDmaStartDmaTransfer(
+ NvRmDmaHandle hDma,
+ NvRmDmaClientBuffer *pClientBuffer,
+ NvRmDmaDirection DmaDirection,
+ NvU32 WaitTimeoutInMS,
+ NvOsSemaphoreHandle AsynchSemaphoreId)
+{
+ DmaTransReq *pCurrReq = NULL;
+ RmDmaChannel *pDmaChannel = NULL;
+ NvOsSemaphoreHandle hOnCompleteSema = NULL;
+ NvOsSemaphoreHandle hClonedSemaphore = NULL;
+ NvError Error = NvSuccess;
+ NvU16 FreeIndex;
+ NvU16 PrevIndex;
+ NvU16 NextIndex;
+
+ NV_ASSERT(hDma);
+ NV_ASSERT(pClientBuffer);
+
+ // Get the dma info and the dma channel and validate that it shoudl not be
+ // null
+ pDmaChannel = hDma->pDmaChannel;
+
+ // Validate for the source and destination address alignment.
+ NV_ASSERT(!(pClientBuffer->SourceBufferPhyAddress & (DMA_ADDRESS_ALIGNMENT-1)));
+ NV_ASSERT(!(pClientBuffer->DestinationBufferPhyAddress & (DMA_ADDRESS_ALIGNMENT-1)));
+
+ // Validate for the transfer size granularity level.
+ NV_ASSERT(!(pClientBuffer->TransferSize & (DMA_TRANSFER_SIZE_ALIGNMENT-1)));
+
+ //Log the notification parameters after completion.
+ if (WaitTimeoutInMS)
+ {
+ hOnCompleteSema = hDma->hSyncSema;
+ }
+ else
+ {
+ if (AsynchSemaphoreId)
+ {
+ Error = NvOsSemaphoreClone(AsynchSemaphoreId, &hClonedSemaphore);
+ if (Error)
+ return Error;
+ hOnCompleteSema = hClonedSemaphore;
+ }
+ }
+
+ NvOsIntrMutexLock(pDmaChannel->hIntrMutex);
+ if (pDmaChannel->HeadFreeIndex == DMA_NULL_INDEX)
+ {
+ Error = AllocateReqList(pDmaChannel, pDmaChannel->MaxReqList);
+ if (Error)
+ goto Exit;
+ }
+
+ pCurrReq = &pDmaChannel->pTransReqList[pDmaChannel->HeadFreeIndex];
+
+ // Delete the semaphore which was cloned during the last req by this list.
+ NvOsSemaphoreDestroy(pCurrReq->hLastReqSema);
+ pCurrReq->hLastReqSema = NULL;
+
+
+ // Configure the request infromation.
+ pCurrReq->UniqueId = hDma->UniqueId;
+ pCurrReq->TransferMode = RmDmaTransferMode_PingIntMode;
+ pCurrReq->State = RmDmaRequestState_NotStarted;
+ pCurrReq->hOnDmaCompleteSema = hOnCompleteSema;
+ pCurrReq->hOnHalfDmaCompleteSema = NULL;
+
+ if (!WaitTimeoutInMS)
+ pCurrReq->TransferMode |= RmDmaTransferMode_Asynch;
+
+ if (DmaDirection == NvRmDmaDirection_Forward)
+ {
+ pCurrReq->SourceAdd = pClientBuffer->SourceBufferPhyAddress;
+ pCurrReq->DestAdd = pClientBuffer->DestinationBufferPhyAddress;
+ pCurrReq->SourceAddWrap = pClientBuffer->SourceAddressWrapSize;
+ pCurrReq->DestAddWrap = pClientBuffer->DestinationAddressWrapSize;
+ }
+ else
+ {
+ pCurrReq->SourceAdd = pClientBuffer->DestinationBufferPhyAddress;
+ pCurrReq->DestAdd = pClientBuffer->SourceBufferPhyAddress;;
+ pCurrReq->SourceAddWrap = pClientBuffer->DestinationAddressWrapSize;
+ pCurrReq->DestAddWrap = pClientBuffer->SourceAddressWrapSize;
+ }
+
+ pCurrReq->BytesRequested = pClientBuffer->TransferSize;
+ pCurrReq->BytesCurrProgram = 0;
+ pCurrReq->BytesRemaining = 0;
+
+ // Copy the Client related information from register to the current request.
+ pCurrReq->DmaChanRegs.ControlReg = hDma->DmaChRegs.ControlReg;
+ pCurrReq->DmaChanRegs.AhbSequenceReg = hDma->DmaChRegs.AhbSequenceReg;
+ pCurrReq->DmaChanRegs.ApbSequenceReg = hDma->DmaChRegs.ApbSequenceReg;
+ pCurrReq->DmaChanRegs.XmbSequenceReg = hDma->DmaChRegs.XmbSequenceReg;
+ pCurrReq->DmaChanRegs.pHwDmaChanReg = hDma->pDmaChannel->pVirtChannelAdd;
+
+
+ // Configure registers as per current data request.
+ Error = hDma->pDmaChannel->pHwInterface->LogDmaTransferRequestFxn(hDma, pCurrReq);
+ if (Error)
+ goto Exit;
+
+ // Adding the request on the list
+ FreeIndex = pDmaChannel->HeadFreeIndex;
+ pDmaChannel->HeadFreeIndex = pDmaChannel->pTransReqList[pDmaChannel->HeadFreeIndex].NextIndex;
+
+ PrevIndex = pDmaChannel->TailReqIndex;
+ if (pDmaChannel->HeadReqIndex == DMA_NULL_INDEX)
+ {
+ pDmaChannel->HeadReqIndex = FreeIndex;
+ pDmaChannel->TailReqIndex = FreeIndex;
+ pDmaChannel->pTransReqList[FreeIndex].NextIndex = DMA_NULL_INDEX;
+ StartDmaTransfer(pDmaChannel);
+ }
+ else
+ {
+ pDmaChannel->pTransReqList[pDmaChannel->TailReqIndex].NextIndex = FreeIndex;
+ pDmaChannel->pTransReqList[FreeIndex].NextIndex = DMA_NULL_INDEX;
+ pDmaChannel->pTransReqList[FreeIndex].PrevIndex = pDmaChannel->TailReqIndex;
+ pDmaChannel->TailReqIndex = FreeIndex;
+ }
+
+ // If asynchronous operation then return.
+ if (!WaitTimeoutInMS)
+ {
+ pCurrReq->hLastReqSema = hClonedSemaphore;
+ goto Exit;
+ }
+ NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
+
+ // Not worrying about the wait error as the state of the request will decide
+ // the status of the transfer.
+ (void)NvOsSemaphoreWaitTimeout(hOnCompleteSema, WaitTimeoutInMS);
+
+ // Lock the channel to access the request.
+ NvOsIntrMutexLock(pDmaChannel->hIntrMutex);
+
+ // Check for the state of the current transfer.
+ switch (pCurrReq->State)
+ {
+ case RmDmaRequestState_NotStarted :
+ // Free the req list.
+ NextIndex = pDmaChannel->pTransReqList[FreeIndex].NextIndex;
+ pDmaChannel->pTransReqList[FreeIndex].NextIndex = pDmaChannel->HeadFreeIndex;
+ pDmaChannel->HeadFreeIndex = FreeIndex;
+ if (PrevIndex == DMA_NULL_INDEX)
+ {
+ pDmaChannel->HeadReqIndex = NextIndex;
+ if (NextIndex == DMA_NULL_INDEX)
+ pDmaChannel->TailReqIndex = DMA_NULL_INDEX;
+ }
+ else
+ {
+ pDmaChannel->pTransReqList[PrevIndex].NextIndex = NextIndex;
+ if (NextIndex != DMA_NULL_INDEX)
+ pDmaChannel->pTransReqList[NextIndex].PrevIndex = PrevIndex;
+ }
+ Error = NvError_Timeout;
+ break;
+
+ case RmDmaRequestState_Running:
+ // Current transfer is running so stop it now.
+ StopDmaTransfer(pDmaChannel);
+ if (pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex].NextIndex
+ == DMA_NULL_INDEX)
+ {
+ pDmaChannel->HeadReqIndex = DMA_NULL_INDEX;
+ pDmaChannel->TailReqIndex = DMA_NULL_INDEX;
+ }
+ else
+ {
+ pDmaChannel->HeadReqIndex = pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex].NextIndex;
+ }
+ pDmaChannel->pTransReqList[FreeIndex].NextIndex = pDmaChannel->HeadFreeIndex;
+ pDmaChannel->HeadFreeIndex = FreeIndex;
+
+ // if there is more request then Start the transfer now.
+ if (pDmaChannel->HeadReqIndex != DMA_NULL_INDEX)
+ StartDmaTransfer(pDmaChannel);
+ Error = NvError_Timeout;
+ break;
+
+
+ case RmDmaRequestState_Completed:
+ // If transfer is completed then transfer state will be NvSuccess;
+ Error = NvSuccess;
+ break;
+
+ default:
+ NV_ASSERT(!"Client Request is in the invalid state");
+ break;
+ }
+
+Exit:
+ NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
+ if (Error)
+ NvOsSemaphoreDestroy(hClonedSemaphore);
+
+ return Error;
+}
+
+/**
+ * It Immediately stop the dma transfer in the channel, delete all the request
+ * from the queue,
+ * Free all the memory of requests.
+ *
+ * Thread safety: During killing of all request, the channel specific data
+ * access is locked to avoid the access of these data by the other thread.
+ * This provide the thread safety.
+ *
+ * For async queued request, the semaphore Id which was passed with start
+ * transfer request are not destroyed. This is the caller responsibility to
+ * destroy all the semaphore which was passed.
+ *
+ */
+void NvRmDmaAbort(NvRmDmaHandle hDma)
+{
+ NvU16 ReqIndex;
+ NvU16 NextIndex;
+ NvU16 PrevIndex;
+ RmDmaChannel *pDmaChannel = NULL;
+ NvBool IsRequireToStart = NV_FALSE;
+
+ // If null dma handle then return.
+ if (!hDma)
+ return;
+
+ // Get the dma channel pointer and if its null pointer then return.
+ pDmaChannel = hDma->pDmaChannel;
+
+ // The process of killing all the request is depends on the priority of the
+ // dma.
+ if (NvRmDmaPriority_High == pDmaChannel->Priority)
+ {
+ // Stop the dma transfer.
+ StopDmaTransfer(pDmaChannel);
+
+ // Kill all request
+ // Lock the channel related data base to avoid the access by other
+ // client.
+ NvOsIntrMutexLock(pDmaChannel->hIntrMutex);
+
+ ReqIndex = pDmaChannel->HeadReqIndex;
+ while (ReqIndex != DMA_NULL_INDEX)
+ {
+ NextIndex = pDmaChannel->pTransReqList[ReqIndex].NextIndex;
+ if (pDmaChannel->pTransReqList[ReqIndex].hOnDmaCompleteSema)
+ {
+ NvOsSemaphoreDestroy(pDmaChannel->pTransReqList[ReqIndex].hOnDmaCompleteSema);
+ pDmaChannel->pTransReqList[ReqIndex].hLastReqSema = NULL;
+ pDmaChannel->pTransReqList[ReqIndex].hOnDmaCompleteSema = NULL;
+ }
+
+ if (pDmaChannel->HeadFreeIndex != DMA_NULL_INDEX)
+ pDmaChannel->pTransReqList[ReqIndex].NextIndex = pDmaChannel->HeadFreeIndex;
+ pDmaChannel->HeadFreeIndex = ReqIndex;
+ ReqIndex = NextIndex;
+ }
+ pDmaChannel->HeadReqIndex = DMA_NULL_INDEX;
+ pDmaChannel->TailReqIndex = DMA_NULL_INDEX;
+
+ // Unlock the channel related data base so that it can be access by
+ // other client
+ NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
+ }
+ else
+ {
+ // Lock the channel access mutex.
+ NvOsIntrMutexLock(pDmaChannel->hIntrMutex);
+
+ // Check whether the abort request is for current running transfer
+ // or not. The identification is done based on unique Id.
+ IsRequireToStart = NV_FALSE;
+ if (pDmaChannel->pTransReqList[pDmaChannel->HeadReqIndex].UniqueId ==
+ hDma->UniqueId)
+ {
+ // The request need to be abort so stop the dma channel.
+ StopDmaTransfer(pDmaChannel);
+ IsRequireToStart = NV_TRUE;
+ }
+
+ ReqIndex = pDmaChannel->HeadReqIndex;
+ PrevIndex = DMA_NULL_INDEX;
+ while (ReqIndex != DMA_NULL_INDEX)
+ {
+ NextIndex = pDmaChannel->pTransReqList[ReqIndex].NextIndex;
+ if (pDmaChannel->pTransReqList[ReqIndex].UniqueId == hDma->UniqueId)
+ {
+ if (pDmaChannel->pTransReqList[ReqIndex].hOnDmaCompleteSema)
+ {
+ NvOsSemaphoreDestroy(pDmaChannel->pTransReqList[ReqIndex].hOnDmaCompleteSema);
+ pDmaChannel->pTransReqList[ReqIndex].hLastReqSema = NULL;
+ pDmaChannel->pTransReqList[ReqIndex].hOnDmaCompleteSema = NULL;
+ }
+ if (PrevIndex != DMA_NULL_INDEX)
+ pDmaChannel->pTransReqList[PrevIndex].NextIndex = NextIndex;
+
+ if (NextIndex == DMA_NULL_INDEX)
+ pDmaChannel->TailReqIndex = PrevIndex;
+ else
+ pDmaChannel->pTransReqList[NextIndex].PrevIndex = PrevIndex;
+ pDmaChannel->pTransReqList[ReqIndex].NextIndex = pDmaChannel->HeadFreeIndex;
+ pDmaChannel->HeadFreeIndex = ReqIndex;
+ }
+ PrevIndex = ReqIndex;
+ if (pDmaChannel->HeadReqIndex == ReqIndex)
+ pDmaChannel->HeadReqIndex = NextIndex;
+ ReqIndex = NextIndex;
+ }
+ if (pDmaChannel->HeadReqIndex != DMA_NULL_INDEX)
+ {
+ if (IsRequireToStart)
+ StartDmaTransfer(pDmaChannel);
+ }
+ // Unlock the channel access mutex.
+ NvOsIntrMutexUnlock(pDmaChannel->hIntrMutex);
+ }
+}
+
+#define DEBUG_GET_COUNT 0
+NvError NvRmDmaGetTransferredCount(
+ NvRmDmaHandle hDma,
+ NvU32 *pTransferCount,
+ NvBool IsTransferStop )
+{
+ DmaTransReq *pCurrReq = NULL;
+ NvError Error = NvSuccess;
+#if DEBUG_GET_COUNT
+ NvBool IsPrint = NV_TRUE;
+#endif
+
+ NV_ASSERT(hDma);
+ NV_ASSERT(pTransferCount);
+
+ NvOsIntrMutexLock(hDma->pDmaChannel->hIntrMutex);
+
+ if (hDma->pDmaChannel->HeadReqIndex == DMA_NULL_INDEX)
+ {
+ *pTransferCount = hDma->pDmaChannel->LastReqSize;
+#if DEBUG_GET_COUNT
+ NvOsDebugPrintf("RmDmaGetTransCount ERROR1\n");
+#endif
+ goto ErrorExit;
+ }
+
+ pCurrReq = &hDma->pDmaChannel->pTransReqList[hDma->pDmaChannel->HeadReqIndex];
+ if ((pCurrReq->State != RmDmaRequestState_Running) &&
+ (pCurrReq->State != RmDmaRequestState_Stopped))
+ {
+ Error = NvError_InvalidState;
+#if DEBUG_GET_COUNT
+ NvOsDebugPrintf("RmDmaGetTransCount ERROR\n");
+#endif
+ goto ErrorExit;
+ }
+
+ if (IsTransferStop)
+ {
+ if (pCurrReq->State == RmDmaRequestState_Running)
+ {
+ *pTransferCount = hDma->pDmaChannel->pHwInterface->DmaHwGetTransferredCountWithStopFxn(
+ &pCurrReq->DmaChanRegs, NV_TRUE);
+ pCurrReq->State = RmDmaRequestState_Stopped;
+ hDma->pDmaChannel->pHwInterface->DmaHwStopTransferFxn(&pCurrReq->DmaChanRegs);
+ }
+ else
+ {
+ *pTransferCount = hDma->pDmaChannel->pHwInterface->DmaHwGetTransferredCountFxn(
+ &pCurrReq->DmaChanRegs);
+ }
+ }
+ else
+ {
+ if (pCurrReq->State == RmDmaRequestState_Stopped)
+ {
+ pCurrReq->State = RmDmaRequestState_Running;
+ hDma->pDmaChannel->pHwInterface->DmaHwStartTransferFxn(&pCurrReq->DmaChanRegs);
+ *pTransferCount = 0;
+#if DEBUG_GET_COUNT
+ IsPrint = NV_FALSE;
+#endif
+ }
+ else
+ {
+ *pTransferCount = hDma->pDmaChannel->pHwInterface->DmaHwGetTransferredCountFxn(
+ &pCurrReq->DmaChanRegs);
+ }
+ }
+
+#if DEBUG_GET_COUNT
+ NvOsDebugPrintf("RmDmaGetTransCount() TransferCount 0x%08x \n", *pTransferCount);
+#endif
+
+ErrorExit:
+ NvOsIntrMutexUnlock(hDma->pDmaChannel->hIntrMutex);
+ return Error;
+}
+
+NvBool NvRmDmaIsDmaTransferCompletes(
+ NvRmDmaHandle hDma,
+ NvBool IsFirstHalfBuffer)
+{
+ // This API is not supported in the os level driver.
+ NV_ASSERT(0);
+ return NV_FALSE;
+}
+
+
+NvError NvRmPrivDmaSuspend()
+{
+ // Global disable the dma channels.
+ s_ApbDmaInterface.DmaHwGlobalSetFxn(s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd,
+ NV_FALSE);
+ // Disables clocks
+ (void)NvRmPowerModuleClockControl(s_DmaInfo.hDevice, NvRmPrivModuleID_ApbDma,
+ 0, NV_FALSE);
+ return NvSuccess;
+}
+
+NvError NvRmPrivDmaResume()
+{
+ // Global enable the dma channels.
+ s_ApbDmaInterface.DmaHwGlobalSetFxn(s_DmaInfo.ApbDmaGenReg.pGenVirtBaseAdd,
+ NV_TRUE);
+ // Enables clocks
+ (void)NvRmPowerModuleClockControl(s_DmaInfo.hDevice, NvRmPrivModuleID_ApbDma,
+ 0, NV_TRUE);
+ return NvSuccess;
+}
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio.c b/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio.c
new file mode 100644
index 000000000000..e84792848a59
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio.c
@@ -0,0 +1,590 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "ap15/ap15rm_gpio_vi.h"
+#include "nvrm_gpio_private.h"
+#include "nvassert.h"
+#include "nvos.h"
+#include "nvrm_pinmux_utils.h"
+#include "ap15/arapbpm.h"
+#include "nvodm_gpio_ext.h"
+
+// Treats GPIO pin handle releases like the pin is completely invalidated:
+// returned to SFIO state and tristated. See the FIXME comment below
+// to see why this isn't enabled currently...
+#define RELEASE_IS_INVALIDATE 1
+#define NV_ENABLE_GPIO_POWER_RAIL 1
+
+typedef struct NvRmGpioPinInfoRec {
+ NvBool used;
+ NvU32 port;
+ NvU32 inst;
+ NvU32 pin;
+ NvRmGpioPinMode mode;
+ /* Sets up a chain of pins associated by one semaphore. Usefull to parse the
+ * pins when an interrupt is received. */
+ NvU32 nextPin;
+ NvU16 irqNumber;
+} NvRmGpioPinInfo;
+
+typedef struct NvRmGpioRec
+{
+ NvU32 RefCount;
+ NvRmDeviceHandle hRm;
+ NvRmGpioPinInfo *pPinInfo;
+ NvRmGpioCaps *caps;
+ NvU32 *pIvlReg;
+} NvRmGpio;
+
+
+static NvRmGpioHandle s_hGpio = NULL;
+
+static NvOsMutexHandle s_GpioMutex = NULL;
+
+NvError
+NvRmGpioOpen(
+ NvRmDeviceHandle hRm,
+ NvRmGpioHandle* phGpio)
+{
+ NvError err = NvSuccess;
+ NvU32 total_pins;
+ NvU32 i;
+ NvU32 gpioShadowSize;
+ NvU32 gpioShadowPhysical;
+
+ NV_ASSERT(hRm);
+ NV_ASSERT(phGpio);
+
+ if (!s_GpioMutex)
+ {
+ err = NvOsMutexCreate(&s_GpioMutex);
+ if (err != NvSuccess)
+ {
+ goto fail;
+ }
+ }
+
+ NvOsMutexLock(s_GpioMutex);
+ if (s_hGpio)
+ {
+ s_hGpio->RefCount++;
+ goto exit;
+ }
+
+ s_hGpio = (NvRmGpio *)NvOsAlloc(sizeof(NvRmGpio));
+ if (!s_hGpio)
+ {
+ err = NvError_InsufficientMemory;
+ goto exit;
+ }
+ NvOsMemset(s_hGpio, 0, sizeof(NvRmGpio));
+ s_hGpio->hRm = hRm;
+
+ err = NvRmGpioGetCapabilities(hRm, (void **)&(s_hGpio->caps));
+ if (err)
+ {
+ // Was a default supplied?
+ if (s_hGpio->caps == NULL)
+ {
+ goto fail;
+ }
+ }
+
+ total_pins = s_hGpio->caps->Instances * s_hGpio->caps->PortsPerInstances *
+ s_hGpio->caps->PinsPerPort;
+
+ s_hGpio->pPinInfo = NvOsAlloc(sizeof(NvRmGpioPinInfo) * total_pins);
+ if (s_hGpio->pPinInfo == NULL)
+ {
+ NvOsFree(s_hGpio);
+ goto exit;
+ }
+ NvOsMemset(s_hGpio->pPinInfo, 0, sizeof(NvRmGpioPinInfo) * total_pins);
+ for (i=0; i<total_pins; i++)
+ {
+ s_hGpio->pPinInfo[i].irqNumber = NVRM_IRQ_INVALID;
+ }
+ s_hGpio->RefCount++;
+
+ gpioShadowSize = sizeof(NvU32) * (NvU8)s_hGpio->caps->PortsPerInstances * (NvU8)s_hGpio->caps->Instances;
+ gpioShadowPhysical = NV_REGR(hRm, NvRmModuleID_Pmif, 0, APBDEV_PMC_SCRATCH19_0);
+ /* Hack. There is no need for shadow on AP20 */
+ if (hRm->ChipId.Id == 0x20 || !gpioShadowPhysical)
+ {
+ s_hGpio->pIvlReg = NvOsAlloc(gpioShadowSize);
+ NvOsMemset(s_hGpio->pIvlReg, 0, gpioShadowSize);
+ }
+ else
+ {
+ /* Map the shadow region that the OAL is using by reading the physical
+ * address stored in PMC scratch register */
+ err = NvOsPhysicalMemMap(gpioShadowPhysical, gpioShadowSize,
+ NvOsMemAttribute_Uncached,
+ NVOS_MEM_READ_WRITE,
+ (void **)&(s_hGpio->pIvlReg));
+ if (err != NvSuccess)
+ {
+ goto fail;
+ }
+ }
+exit:
+ *phGpio = s_hGpio;
+ NvOsMutexUnlock(s_GpioMutex);
+
+fail:
+ return err;
+}
+
+void NvRmGpioClose(NvRmGpioHandle hGpio)
+{
+ if (!hGpio)
+ return;
+
+ NV_ASSERT(hGpio->RefCount);
+
+ NvOsMutexLock(s_GpioMutex);
+ hGpio->RefCount--;
+ if (hGpio->RefCount == 0)
+ {
+ NvU32 gpioShadowSize;
+ NvU32 gpioShadowPhysical;
+
+ NvOsFree(s_hGpio->pPinInfo);
+ gpioShadowSize = sizeof(NvU32) * s_hGpio->caps->PortsPerInstances * s_hGpio->caps->Instances;
+ gpioShadowPhysical = NV_REGR(hGpio->hRm, NvRmModuleID_Pmif, 0, APBDEV_PMC_SCRATCH19_0);
+ if (hGpio->hRm->ChipId.Id == 0x20 || !gpioShadowPhysical)
+ {
+ NvOsFree(s_hGpio->pIvlReg);
+ } else
+ {
+ NvOsPhysicalMemUnmap(s_hGpio->pIvlReg, gpioShadowSize);
+ }
+ NvOsFree(s_hGpio);
+ s_hGpio = NULL;
+ }
+ NvOsMutexUnlock(s_GpioMutex);
+}
+
+
+NvError
+NvRmGpioAcquirePinHandle(
+ NvRmGpioHandle hGpio,
+ NvU32 port,
+ NvU32 pinNumber,
+ NvRmGpioPinHandle *phPin)
+{
+ NvU32 MaxPorts;
+
+ NV_ASSERT(hGpio != NULL);
+
+ NvOsMutexLock(s_GpioMutex);
+ if (port == NVRM_GPIO_CAMERA_PORT)
+ {
+ // The Camera has dedicated gpio pins that must be controlled
+ // through a non-standard gpio port control.
+ NvRmPrivGpioViAcquirePinHandle(hGpio->hRm, pinNumber);
+ *phPin = GPIO_MAKE_PIN_HANDLE(NVRM_GPIO_CAMERA_INST, port, pinNumber);
+ }
+ else if ((port >= NVODM_GPIO_EXT_PORT_0) &&
+ (port <= NVODM_GPIO_EXT_PORT_F))
+ {
+ // Create a pin handle for GPIOs that are
+ // sourced by external (off-chip) peripherals
+ *phPin = GPIO_MAKE_PIN_HANDLE((port & 0xFF), port, pinNumber);
+ }
+ else
+ {
+ NV_ASSERT(4 == hGpio->caps->PortsPerInstances);
+ MaxPorts = hGpio->caps->Instances * 4;
+
+ if ((port > MaxPorts) || (pinNumber > hGpio->caps->PinsPerPort))
+ {
+ NV_ASSERT(!" Illegal port or pin number. ");
+ }
+
+ *phPin = GPIO_MAKE_PIN_HANDLE(port >> 2, port & 0x3, pinNumber);
+ }
+ NvOsMutexUnlock(s_GpioMutex);
+ return NvSuccess;
+}
+
+void NvRmGpioReleasePinHandles(
+ NvRmGpioHandle hGpio,
+ NvRmGpioPinHandle *hPin,
+ NvU32 pinCount)
+{
+ NvU32 i;
+ NvU32 port;
+ NvU32 pin;
+ NvU32 instance;
+
+ if (hPin == NULL) return;
+
+ for (i=0; i<pinCount; i++)
+ {
+ instance = GET_INSTANCE(hPin[i]);
+ port = GET_PORT(hPin[i]);
+ pin = GET_PIN(hPin[i]);
+
+ NvOsMutexLock(s_GpioMutex);
+ if (port == NVRM_GPIO_CAMERA_PORT)
+ {
+ NvRmPrivGpioViReleasePinHandles(hGpio->hRm, pin);
+ }
+ else if ((port >= NVODM_GPIO_EXT_PORT_0) &&
+ (port <= NVODM_GPIO_EXT_PORT_F))
+ {
+ // Do nothing for now...
+ }
+ else
+ {
+ NvU32 alphaPort;
+
+ alphaPort = instance * s_hGpio->caps->PortsPerInstances + port;
+ if (s_hGpio->pPinInfo[pin + alphaPort * s_hGpio->caps->PinsPerPort].used)
+ {
+ NV_DEBUG_PRINTF(("Warning: Releasing in-use GPIO pin handle GPIO_P%c.%02u (%c=%u)\n",
+ 'A'+alphaPort,pin,'A'+alphaPort, alphaPort));
+#if RELEASE_IS_INVALIDATE
+ GPIO_MASKED_WRITE(hGpio->hRm, instance, port, CNF, pin, 0);
+ NvRmSetGpioTristate(hGpio->hRm, alphaPort, pin, NV_TRUE);
+ s_hGpio->pPinInfo[pin + alphaPort*s_hGpio->caps->PinsPerPort].used = NV_FALSE;
+#endif
+ }
+ }
+ NvOsMutexUnlock(s_GpioMutex);
+ }
+
+ return;
+}
+
+
+void NvRmGpioReadPins(
+ NvRmGpioHandle hGpio,
+ NvRmGpioPinHandle *hPin,
+ NvRmGpioPinState *pPinState,
+ NvU32 pinCount )
+{
+ NvU32 inst;
+ NvU32 port;
+ NvU32 pin;
+ NvU32 RegValue;
+ NvU32 i;
+
+ NV_ASSERT(hPin != NULL);
+ NV_ASSERT(hGpio != NULL);
+ NV_ASSERT(hGpio->caps != NULL);
+
+ for (i=0; i<pinCount; i++)
+ {
+ port = GET_PORT(hPin[i]);
+ pin = GET_PIN(hPin[i]);
+ inst = GET_INSTANCE(hPin[i]);
+
+ if (port == NVRM_GPIO_CAMERA_PORT)
+ {
+ pPinState[i] = NvRmPrivGpioViReadPins(hGpio->hRm, pin);
+ }
+ else if ((port >= (NvU32)NVODM_GPIO_EXT_PORT_0) &&
+ (port <= (NvU32)NVODM_GPIO_EXT_PORT_F))
+ {
+ pPinState[i] = NvOdmExternalGpioReadPins(port, pin);
+ }
+ else
+ {
+ GPIO_REGR(hGpio->hRm, inst, port, OE, RegValue);
+ if (RegValue & (1<<pin))
+ {
+ GPIO_REGR(hGpio->hRm, inst, port, OUT, RegValue);
+ } else
+ {
+ GPIO_REGR(hGpio->hRm, inst, port, IN, RegValue);
+ }
+ pPinState[i] = (RegValue >> pin) & 0x1;
+ }
+ }
+}
+
+void NvRmGpioWritePins(
+ NvRmGpioHandle hGpio,
+ NvRmGpioPinHandle *hPin,
+ NvRmGpioPinState *pPinState,
+ NvU32 pinCount )
+{
+ NvU32 inst;
+ NvU32 port;
+ NvU32 pin;
+ NvU32 i;
+
+ NV_ASSERT(hPin != NULL);
+ NV_ASSERT(hGpio != NULL);
+ NV_ASSERT(hGpio->caps != NULL);
+
+ for (i=0; i<pinCount; i++)
+ {
+ inst = GET_INSTANCE(hPin[i]);
+ port = GET_PORT(hPin[i]);
+ pin = GET_PIN(hPin[i]);
+
+ if (port == NVRM_GPIO_CAMERA_PORT)
+ {
+ NvRmPrivGpioViWritePins(hGpio->hRm, pin, pPinState[i]);
+ }
+ else if ((port >= (NvU32)NVODM_GPIO_EXT_PORT_0) &&
+ (port <= (NvU32)NVODM_GPIO_EXT_PORT_F))
+ {
+ NvOdmExternalGpioWritePins(port, pin, pPinState[i]);
+ }
+ else
+ {
+ // When updating a contiguous set of pins that are
+ // all located in the same port, merge the register
+ // write into a single atomic update.
+ NvU32 updateVec = 0;
+ updateVec = (1<<(pin + GPIO_PINS_PER_PORT));
+ updateVec |= ((pPinState[i] & 0x1)<<pin);
+ while ((i+1<pinCount) &&
+ GET_INSTANCE(hPin[i+1])==inst &&
+ GET_PORT(hPin[i+1]==port))
+ {
+ pin = GET_PIN(hPin[i+1]);
+ updateVec |= (1<<(pin + GPIO_PINS_PER_PORT));
+ updateVec |= ((pPinState[i+1]&0x1)<<pin);
+ i++;
+ }
+ NV_REGW(hGpio->hRm, NvRmPrivModuleID_Gpio, inst,
+ (port*NV_GPIO_PORT_REG_SIZE)+GPIO_MSK_CNF_0+GPIO_OUT_0,
+ updateVec);
+ }
+ }
+
+ return;
+}
+
+
+NvError NvRmGpioConfigPins(
+ NvRmGpioHandle hGpio,
+ NvRmGpioPinHandle *hPin,
+ NvU32 pinCount,
+ NvRmGpioPinMode Mode)
+{
+ NvError err = NvSuccess;
+ NvU32 i;
+ NvU32 inst;
+ NvU32 port;
+ NvU32 pin;
+ NvU32 pinNumber;
+ NvU32 Reg;
+ NvU32 alphaPort;
+
+ NvOsMutexLock(s_GpioMutex);
+
+ for (i=0; i< pinCount; i++)
+ {
+ inst = GET_INSTANCE(hPin[i]);
+ port = GET_PORT(hPin[i]);
+ pin = GET_PIN(hPin[i]);
+
+ if (port == NVRM_GPIO_CAMERA_PORT)
+ {
+ // If they are trying to do the wrong thing, assert.
+ // If they are trying to do the only allowed thing,
+ // quietly skip it, as nothing needs to be done.
+ if (Mode != NvOdmGpioPinMode_Output)
+ {
+ NV_ASSERT(!"Only output is supported for camera gpio.\n");
+ }
+ continue;
+ }
+
+ /* Absolute pin number to index into pPinInfo array and the alphabetic port names. */
+ alphaPort = inst * s_hGpio->caps->PortsPerInstances + port;
+ pinNumber = pin + alphaPort * s_hGpio->caps->PinsPerPort;
+
+ s_hGpio->pPinInfo[pinNumber].mode = Mode;
+ s_hGpio->pPinInfo[pinNumber].inst = inst;
+ s_hGpio->pPinInfo[pinNumber].port = port;
+ s_hGpio->pPinInfo[pinNumber].pin = pin;
+
+ /* Don't try to colapse this swtich as the ordering of the register
+ * writes matter. */
+ switch (Mode)
+ {
+ case NvRmGpioPinMode_Output:
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, OE, pin, 1);
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, CNF, pin, 1);
+
+ break;
+
+ case NvRmGpioPinMode_InputData:
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, OE, pin, 0);
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, CNF, pin, 1);
+
+ break;
+
+ case NvRmGpioPinMode_InputInterruptLow:
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, OE, pin, 0);
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, CNF, pin, 1);
+
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, INT_LVL, pin, 0);
+ break;
+
+ case NvRmGpioPinMode_InputInterruptHigh:
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, OE, pin, 0);
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, CNF, pin, 1);
+
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, INT_LVL, pin, 1);
+ break;
+
+ case NvRmGpioPinMode_InputInterruptAny:
+ if(hGpio->caps->Features & NVRM_GPIO_CAP_FEAT_EDGE_INTR)
+ {
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, OE, pin, 0);
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, CNF, pin, 1);
+
+ GPIO_REGR(hGpio->hRm, inst, port, INT_LVL, Reg);
+ // see the # Bug ID: 359459
+ Reg = (Reg & GPIO_INT_LVL_UNSHADOWED_MASK) |
+ (s_hGpio->pIvlReg[alphaPort] & GPIO_INT_LVL_SHADOWED_MASK);
+ Reg |= (GPIO_INT_LVL_0_EDGE_0_FIELD << pin);
+ Reg |= (GPIO_INT_LVL_0_DELTA_0_FIELD << pin);
+ s_hGpio->pIvlReg[alphaPort] = Reg;
+ GPIO_REGW(hGpio->hRm, inst, port, INT_LVL, Reg);
+ }
+ else
+ {
+ NV_ASSERT(!"Not supported");
+ }
+
+ break;
+
+ case NvRmGpioPinMode_Function:
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, CNF, pin, 0);
+ break;
+ case NvRmGpioPinMode_Inactive:
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, INT_ENB, pin, 0);
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, CNF, pin, 0);
+ break;
+ case NvRmGpioPinMode_InputInterruptRisingEdge:
+ if(hGpio->caps->Features & NVRM_GPIO_CAP_FEAT_EDGE_INTR)
+ {
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, OE, pin, 0);
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, CNF, pin, 1);
+ GPIO_REGW(hGpio->hRm, inst, port, INT_CLR, (1 << pin));
+ GPIO_REGR(hGpio->hRm, inst, port, INT_LVL, Reg);
+ // see the # Bug ID: 359459
+ Reg = (Reg & GPIO_INT_LVL_UNSHADOWED_MASK) | (s_hGpio->pIvlReg[alphaPort] & GPIO_INT_LVL_SHADOWED_MASK);
+ Reg |= (GPIO_INT_LVL_0_BIT_0_FIELD << pin);
+ Reg |= (GPIO_INT_LVL_0_EDGE_0_FIELD << pin);
+ Reg &= ~(GPIO_INT_LVL_0_DELTA_0_FIELD << pin);
+ s_hGpio->pIvlReg[alphaPort] = Reg;
+ GPIO_REGW(hGpio->hRm, inst, port, INT_LVL, Reg);
+ }
+ else
+ {
+ NV_ASSERT(!"Not supported");
+ }
+ break;
+ case NvRmGpioPinMode_InputInterruptFallingEdge:
+ if(hGpio->caps->Features & NVRM_GPIO_CAP_FEAT_EDGE_INTR)
+ {
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, OE, pin, 0);
+ GPIO_MASKED_WRITE(hGpio->hRm, inst, port, CNF, pin, 1);
+ GPIO_REGW(hGpio->hRm, inst, port, INT_CLR, (1 << pin));
+ GPIO_REGR(hGpio->hRm, inst, port, INT_LVL, Reg);
+ // see the # Bug ID: 359459
+ Reg = (Reg & GPIO_INT_LVL_UNSHADOWED_MASK) |(s_hGpio->pIvlReg[alphaPort] & GPIO_INT_LVL_SHADOWED_MASK);
+ Reg &= ~(GPIO_INT_LVL_0_BIT_0_FIELD << pin);
+ Reg |= (GPIO_INT_LVL_0_EDGE_0_FIELD << pin);
+ Reg &= ~(GPIO_INT_LVL_0_DELTA_0_FIELD << pin);
+ s_hGpio->pIvlReg[alphaPort] = Reg;
+ GPIO_REGW(hGpio->hRm, inst, port, INT_LVL, Reg);
+ }
+ else
+ {
+ NV_ASSERT(!"Not supported");
+ }
+ break;
+ default:
+ NV_ASSERT(!"Invalid gpio mode");
+ break;
+ }
+
+ /* Pad group global tristates are only modified when the pin transitions
+ * from an inactive state to an active one. Active-to-active and
+ * inactive-to-inactive transitions are ignored */
+ if ((!s_hGpio->pPinInfo[pinNumber].used) && (Mode!=NvRmGpioPinMode_Inactive))
+ {
+#if NV_ENABLE_GPIO_POWER_RAIL
+ err = NvRmGpioIoPowerConfig(hGpio->hRm, alphaPort, pin, NV_TRUE);
+#endif
+ NvRmSetGpioTristate(hGpio->hRm, alphaPort, pin, NV_FALSE);
+ }
+ else if ((s_hGpio->pPinInfo[pinNumber].used) && (Mode==NvRmGpioPinMode_Inactive))
+ {
+#if NV_ENABLE_GPIO_POWER_RAIL
+ err = NvRmGpioIoPowerConfig(hGpio->hRm, alphaPort, pin, NV_FALSE);
+#endif
+ NvRmSetGpioTristate(hGpio->hRm, alphaPort, pin, NV_TRUE);
+ }
+ if (Mode != NvRmGpioPinMode_Inactive)
+ s_hGpio->pPinInfo[pinNumber].used = NV_TRUE;
+ else
+ s_hGpio->pPinInfo[pinNumber].used = NV_FALSE;
+ }
+
+ NvOsMutexUnlock(s_GpioMutex);
+ return err;
+}
+
+NvError NvRmGpioGetIrqs(
+ NvRmDeviceHandle hRmDevice,
+ NvRmGpioPinHandle * hPin,
+ NvU32 * Irq,
+ NvU32 pinCount )
+{
+ NvU32 i;
+ for (i=0; i< pinCount; i++)
+ {
+ NvU32 port, pin, inst;
+
+ port = GET_PORT(hPin[i]);
+ pin = GET_PIN(hPin[i]);
+ inst = GET_INSTANCE(hPin[i]);
+
+ Irq[i] = NvRmGetIrqForLogicalInterrupt(hRmDevice,
+ NVRM_MODULE_ID(NvRmPrivModuleID_Gpio, inst),
+ pin + port * GPIO_PINS_PER_PORT);
+ }
+ return NvSuccess;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio_private.c b/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio_private.c
new file mode 100644
index 000000000000..361413dec1a1
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio_private.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvrm_gpio_private.h"
+#include "nvassert.h"
+#include "nvos.h"
+
+static NvRmGpioCaps s_ap15_caps = {6, 4, GPIO_PINS_PER_PORT, NVRM_GPIO_CAP_FEAT_EDGE_INTR /* (SEE BUG# 366493) */};
+
+static NvRmModuleCapability s_capsArray[] = {
+ /* Major, minor, eco and caps structure */
+ { 2, 0, 0, &s_ap15_caps },
+};
+
+static NvBool s_GpioIoPowerInitialized = NV_FALSE;
+
+static NvRmGpioIoPowerInfo s_GpioIoPowerTable[] =
+{
+ {NV_VDD_SYS_ODM_ID, 0},
+ {NV_VDD_BB_ODM_ID, 0},
+ {NV_VDD_VI_ODM_ID, 0},
+ {NV_VDD_SDIO_ODM_ID, 0},
+ {NV_VDD_LCD_ODM_ID, 0},
+ {NV_VDD_UART_ODM_ID, 0}
+};
+
+NvError
+NvRmGpioGetCapabilities(
+ NvRmDeviceHandle hRm,
+ void **Capability )
+{
+ NvError err = NvSuccess;
+
+ NV_ASSERT(hRm);
+
+ err = NvRmModuleGetCapabilities(hRm, NvRmPrivModuleID_Gpio, s_capsArray,
+ NV_ARRAY_SIZE(s_capsArray), Capability);
+ if (err)
+ {
+ /* Default to AP15 caps.
+ FIXME: findout why the RM API is returning failure. */
+ NV_ASSERT(0);
+ *Capability = (void*)&s_ap15_caps;
+ }
+
+ ((NvRmGpioCaps *)*Capability)->Instances =
+ NvRmModuleGetNumInstances(hRm, NvRmPrivModuleID_Gpio);
+
+ return err;
+}
+
+static NvError NvRmGpioIoPowerDiscover(
+ NvRmDeviceHandle hRm)
+{
+ NvU32 i;
+ const NvOdmPeripheralConnectivity* pCon = NULL;
+
+ for (i = 0; i < NV_ARRAY_SIZE(s_GpioIoPowerTable); i++)
+ {
+ pCon = NvOdmPeripheralGetGuid(s_GpioIoPowerTable[i].PowerRailId);
+ if (!pCon || !pCon->NumAddress)
+ return NvError_NotSupported;
+ s_GpioIoPowerTable[i].PmuRailAddress = pCon->AddressList[0].Address;
+ }
+ return NvSuccess;
+}
+
+NvError NvRmGpioIoPowerConfig(
+ NvRmDeviceHandle hRm,
+ NvU32 port,
+ NvU32 pinNumber,
+ NvBool Enable)
+{
+ NvRmPmuVddRailCapabilities RailCaps;
+ NvU32 SettlingTime;
+ NvRmGpioIoPowerInfo *pGpioIoPower;
+
+ if (!s_GpioIoPowerInitialized)
+ {
+ NvError err = NvRmGpioIoPowerDiscover(hRm);
+ if (err)
+ return err;
+ s_GpioIoPowerInitialized = NV_TRUE;
+ }
+
+ if ((port == GPIO_PORT('s')) ||
+ (port == GPIO_PORT('q')) ||
+ (port == GPIO_PORT('r')))
+ {
+ /* NV_VDD_SYS_ODM_ID */
+ pGpioIoPower = &s_GpioIoPowerTable[0];
+ }
+ else if ((port == GPIO_PORT('o')) ||
+ ((port == GPIO_PORT('v')) && (pinNumber < 4)))
+ {
+ /* NV_VDD_BB_ODM_ID */
+ pGpioIoPower = &s_GpioIoPowerTable[1];
+ }
+ else if ((port == GPIO_PORT('l')) ||
+ ((port == GPIO_PORT('d')) && (pinNumber > 4)) ||
+ ((port == GPIO_PORT('t')) && (pinNumber < 5)))
+ {
+ /* NV_VDD_VI_ODM_ID */
+ pGpioIoPower = &s_GpioIoPowerTable[2];
+ }
+ else if (((port == GPIO_PORT('d')) && (pinNumber < 5)) ||
+ ((port == GPIO_PORT('b')) && (pinNumber > 3)) ||
+ ((port == GPIO_PORT('v')) && ((pinNumber > 3) &&
+ (pinNumber < 7))) ||
+ ((port == GPIO_PORT('a')) && ((pinNumber > 5) ||
+ (pinNumber == 0))))
+ {
+ /* NV_VDD_SDIO_ODM_ID */
+ pGpioIoPower = &s_GpioIoPowerTable[3];
+ }
+ else if ((port == GPIO_PORT('e')) ||
+ (port == GPIO_PORT('f')) ||
+ (port == GPIO_PORT('m')) ||
+ ((port == GPIO_PORT('c')) && ((pinNumber == 1) ||
+ (pinNumber == 6))) ||
+ ((port == GPIO_PORT('w')) && (pinNumber < 2)) ||
+ ((port == GPIO_PORT('j')) && ((pinNumber == 1) ||
+ (pinNumber == 3) || (pinNumber == 4))) ||
+ ((port == GPIO_PORT('v')) && (pinNumber == 7)) ||
+ ((port == GPIO_PORT('n')) && (pinNumber > 3)) ||
+ ((port == GPIO_PORT('b')) && ((pinNumber == 2) ||
+ (pinNumber == 3))))
+ {
+ /* NV_VDD_LCD_ODM_ID */
+ pGpioIoPower = &s_GpioIoPowerTable[4];
+ }
+ else
+ {
+ /* NV_VDD_UART_ODM_ID */
+ pGpioIoPower = &s_GpioIoPowerTable[5];
+ }
+
+ if (Enable)
+ {
+ NvRmPmuGetCapabilities(hRm,
+ pGpioIoPower->PmuRailAddress, &RailCaps);
+ NvRmPmuSetVoltage(hRm,
+ pGpioIoPower->PmuRailAddress,
+ RailCaps.requestMilliVolts, &SettlingTime);
+ }
+ else
+ {
+ NvRmPmuSetVoltage(hRm,
+ pGpioIoPower->PmuRailAddress,
+ ODM_VOLTAGE_OFF, &SettlingTime);
+ }
+ if (SettlingTime)
+ NvOsWaitUS(SettlingTime);
+
+ return NvSuccess;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio_private.h b/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio_private.h
new file mode 100644
index 000000000000..deb48f2d97f5
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio_private.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_GPIO_PRIVATE_H
+#define INCLUDED_NVRM_GPIO_PRIVATE_H
+
+#include "nvrm_gpio.h"
+#include "ap15/argpio.h"
+#include "nvrm_structure.h"
+#include "ap15/ap15rm_private.h"
+#include "nvrm_hwintf.h"
+#include "nvodm_query_discovery.h"
+#include "nvrm_pmu.h"
+
+#define GPIO_INTR_MAX 32
+#define GPIO_PORT(x) ((x) - 'a')
+#define GET_PIN(h) ((((NvU32)(h))) & 0xFF)
+#define GET_PORT(h) ((((NvU32)(h)) >> 8) & 0xFF)
+#define GET_INSTANCE(h) ((((NvU32)(h)) >> 16) & 0xFF)
+
+// Size of a port register.
+#define NV_GPIO_PORT_REG_SIZE (GPIO_CNF_1 - GPIO_CNF_0)
+#define GPIO_INT_LVL_UNSHADOWED_MASK \
+ (GPIO_INT_LVL_0_BIT_7_FIELD | GPIO_INT_LVL_0_BIT_6_FIELD | \
+ GPIO_INT_LVL_0_BIT_5_FIELD | GPIO_INT_LVL_0_BIT_4_FIELD | \
+ GPIO_INT_LVL_0_BIT_3_FIELD | GPIO_INT_LVL_0_BIT_2_FIELD | \
+ GPIO_INT_LVL_0_BIT_1_FIELD | GPIO_INT_LVL_0_BIT_0_FIELD)
+
+#define GPIO_INT_LVL_SHADOWED_MASK (~GPIO_INT_LVL_UNSHADOWED_MASK)
+
+// Gpio register read/write macros
+
+#define GPIO_PINS_PER_PORT 8
+
+#define GPIO_MASKED_WRITE(rm, Instance, Port, Reg, Pin, value) \
+ do \
+ { \
+ NV_REGW((rm), NvRmPrivModuleID_Gpio, (Instance), ((Port) * NV_GPIO_PORT_REG_SIZE) + GPIO_MSK_CNF_0 + \
+ (GPIO_##Reg##_0), (((1<<((Pin)+ GPIO_PINS_PER_PORT)) | ((value) << (Pin))))); \
+ } while (0)
+
+
+// Gpio register read/write macros
+#define GPIO_REGR( rm, Instance, Port, Reg, ReadData) \
+ do \
+ { \
+ ReadData = NV_REGR((rm), NvRmPrivModuleID_Gpio, (Instance), ((Port) * NV_GPIO_PORT_REG_SIZE) + \
+ (GPIO_##Reg##_0)); \
+ } while (0)
+
+#define GPIO_REGW( rm, Instance, Port, Reg, Data2Write ) \
+ do \
+ { \
+ NV_REGW((rm), NvRmPrivModuleID_Gpio, (Instance), ((Port) * NV_GPIO_PORT_REG_SIZE) + \
+ (GPIO_##Reg##_0), (Data2Write)); \
+ } while (0)
+
+/* Bit mask of hardware features present in GPIO controller. */
+typedef enum {
+ NVRM_GPIO_CAP_FEAT_NONE = 0,
+ NVRM_GPIO_CAP_FEAT_EDGE_INTR = 0x000000001
+} NvRmGpioCapFeatures;
+
+
+typedef struct NvRmGpioCapsRec {
+ NvU32 Instances;
+ NvU32 PortsPerInstances;
+ NvU32 PinsPerPort;
+ NvU32 Features;
+} NvRmGpioCaps;
+
+typedef struct NvRmGpioIoPowerInfoRec
+{
+ // SoC Power rail GUID
+ NvU64 PowerRailId;
+
+ // PMU Rail Address
+ NvU32 PmuRailAddress;
+
+} NvRmGpioIoPowerInfo;
+
+/**
+ * GPIO wrapper for NvRmModuleGetCapabilities().
+ *
+ * @param hRm The RM device handle
+ * @param Capability Out parameter: the cap that maches the current hardware
+ */
+NvError
+NvRmGpioGetCapabilities(
+ NvRmDeviceHandle hRm,
+ void **Capability );
+
+NvError NvRmGpioIoPowerConfig(
+ NvRmDeviceHandle hRm,
+ NvU32 port,
+ NvU32 pinNumber,
+ NvBool Enable);
+
+#endif // INCLUDED_NVRM_GPIO_PRIVATE_H
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio_stub_helper.c b/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio_stub_helper.c
new file mode 100644
index 000000000000..c3ab43cb7842
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/nvrm_gpio_stub_helper.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvassert.h"
+#include "nvos.h"
+#include "nvrm_gpio.h"
+#include "nvrm_interrupt.h"
+#include "nvrm_moduleids.h"
+
+struct NvRmGpioInterruptRec
+{
+ NvRmDeviceHandle hRm;
+ NvRmGpioHandle hGpio;
+ NvRmGpioPinHandle hPin;
+ NvRmGpioPinMode Mode;
+ NvOsInterruptHandler Callback;
+ void *arg;
+ NvU32 IrqNumber;
+ NvOsInterruptHandle NvOsIntHandle;
+ NvU32 DebounceTime;
+};
+
+static
+void NvRmPrivGpioIsr(void *arg);
+
+NvError
+NvRmGpioInterruptRegister(
+ NvRmGpioHandle hGpio,
+ NvRmDeviceHandle hRm,
+ NvRmGpioPinHandle hPin,
+ NvOsInterruptHandler Callback,
+ NvRmGpioPinMode Mode,
+ void *CallbackArg,
+ NvRmGpioInterruptHandle *hGpioInterrupt,
+ NvU32 DebounceTime)
+{
+ /* Get all these from the handle and/or gpio caps API */
+ NvError err;
+ struct NvRmGpioInterruptRec *h = NULL;
+ NvOsInterruptHandler GpioIntHandler = NvRmPrivGpioIsr;
+
+ NV_ASSERT(Mode == NvRmGpioPinMode_InputInterruptLow ||
+ Mode == NvRmGpioPinMode_InputInterruptRisingEdge ||
+ Mode == NvRmGpioPinMode_InputInterruptFallingEdge ||
+ Mode == NvRmGpioPinMode_InputInterruptHigh ||
+ Mode == NvRmGpioPinMode_InputInterruptAny);
+
+ /* Allocate memory for the NvRmGpioInterruptHandle */
+ h = (NvRmGpioInterruptHandle)NvOsAlloc(sizeof(struct NvRmGpioInterruptRec));
+ if (h == NULL)
+ {
+ err = NvError_InsufficientMemory;
+ goto fail;
+ }
+
+ NvOsMemset(h, 0, sizeof(struct NvRmGpioInterruptRec));
+
+ h->hPin = hPin;
+ h->Mode = Mode;
+ h->Callback = Callback;
+ h->hRm = hRm;
+ h->hGpio = hGpio;
+ h->arg = CallbackArg;
+ h->DebounceTime = DebounceTime;
+
+ err = NvRmGpioConfigPins(hGpio, &hPin, 1, Mode);
+ if (err != NvSuccess)
+ {
+ goto fail;
+ }
+
+ if (!h->NvOsIntHandle)
+ {
+ NvRmGpioGetIrqs(hRm, &hPin, &(h->IrqNumber), 1);
+
+ err = NvRmInterruptRegister(hRm, 1, &h->IrqNumber, &GpioIntHandler,
+ h, &h->NvOsIntHandle, NV_FALSE);
+
+ if (err != NvSuccess)
+ {
+ NvError e;
+ e = NvRmGpioConfigPins(hGpio, &hPin, 1, NvRmGpioPinMode_Inactive);
+ NV_ASSERT(!e);
+ (void)e;
+ goto fail;
+ }
+ }
+
+ NV_ASSERT(h->NvOsIntHandle);
+
+ *hGpioInterrupt = h;
+ return NvSuccess;
+
+fail:
+ NvOsFree(h);
+ *hGpioInterrupt = 0;
+ return err;
+}
+
+NvError
+NvRmGpioInterruptEnable(NvRmGpioInterruptHandle hGpioInterrupt)
+{
+ NV_ASSERT(hGpioInterrupt);
+
+ if (!hGpioInterrupt)
+ {
+ return NvError_BadParameter;
+ }
+
+ return NvRmInterruptEnable(hGpioInterrupt->hRm, hGpioInterrupt->NvOsIntHandle);
+}
+
+void
+NvRmGpioInterruptMask(NvRmGpioInterruptHandle hGpioInterrupt, NvBool mask)
+{
+ NvOsInterruptMask(hGpioInterrupt->NvOsIntHandle, mask);
+ return;
+}
+
+static
+void NvRmPrivGpioIsr(void *arg)
+{
+ NvU32 i = 0;
+ NvRmGpioInterruptHandle info = (NvRmGpioInterruptHandle)arg;
+
+ if (info->DebounceTime)
+ {
+ NvOsSleepMS(info->DebounceTime);
+ for (i = 0; i < 100; i++)
+ ;
+ }
+ /* Call the clients callback function */
+ (*info->Callback)(info->arg);
+
+ return;
+}
+
+void
+NvRmGpioInterruptUnregister(
+ NvRmGpioHandle hGpio,
+ NvRmDeviceHandle hRm,
+ NvRmGpioInterruptHandle handle)
+{
+ if (handle == NULL)
+ return;
+
+ NV_ASSERT(hGpio);
+ NV_ASSERT(hRm);
+
+ NV_ASSERT(NvRmGpioConfigPins(hGpio, &handle->hPin, 1, NvRmGpioPinMode_Inactive)
+ == NvSuccess);
+ NvRmInterruptUnregister(hRm, handle->NvOsIntHandle);
+ handle->NvOsIntHandle = NULL;
+
+ NvOsFree(handle);
+ return;
+}
+
+void
+NvRmGpioInterruptDone( NvRmGpioInterruptHandle handle )
+{
+ if (!(handle->NvOsIntHandle))
+ {
+ NV_ASSERT(!"Make sure that interrupt source is enabled AFTER the interrupt is succesfully registered.");
+ }
+ NvRmInterruptDone(handle->NvOsIntHandle);
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/rm_common_slink_hw_private.c b/arch/arm/mach-tegra/nvrm/io/ap15/rm_common_slink_hw_private.c
new file mode 100644
index 000000000000..3065c312323c
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/rm_common_slink_hw_private.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA driver Development Kit:
+ * Private functions implementation for the slink Rm driver</b>
+ *
+ * @b Description: Implements the private functions for the slink hw interface.
+ *
+ */
+
+// hardware includes
+#include "ap15/arslink.h"
+#include "rm_spi_slink_hw_private.h"
+#include "nvrm_drf.h"
+#include "nvrm_hardware_access.h"
+#include "nvassert.h"
+#include "nvos.h"
+
+#define SLINK_REG_READ32(pSlinkHwRegsVirtBaseAdd, reg) \
+ NV_READ32((pSlinkHwRegsVirtBaseAdd) + ((SLINK_##reg##_0)/4))
+#define SLINK_REG_WRITE32(pSlinkHwRegsVirtBaseAdd, reg, val) \
+ do { \
+ NV_WRITE32((((pSlinkHwRegsVirtBaseAdd) + ((SLINK_##reg##_0)/4))), (val)); \
+ } while(0)
+
+
+#define MAX_SLINK_FIFO_DEPTH 32
+
+#define ALL_SLINK_STATUS_CLEAR \
+ (NV_DRF_NUM(SLINK, STATUS, RDY, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, RX_UNF, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, TX_UNF, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, TX_OVF, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, RX_OVF, 1))
+
+#define RX_ERROR_STATUS (NV_DRF_NUM(SLINK, STATUS, RX_UNF, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, RX_OVF, 1))
+#define TX_ERROR_STATUS (NV_DRF_NUM(SLINK, STATUS, TX_OVF, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, TX_UNF, 1))
+
+static void SlinkHwControllerInitialize(SerialHwRegisters *pSlinkHwRegs)
+{
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, COMMAND2,
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command2);
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, COMMAND,
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command1);
+}
+
+
+/**
+ * Set the functional mode whether this is the master or slave mode.
+ */
+static void
+SlinkHwSetFunctionalMode(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvBool IsMasterMode)
+{
+ NvU32 CommandReg = pSlinkHwRegs->HwRegs.SlinkRegs.Command1;
+ if (IsMasterMode)
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, M_S, MASTER, CommandReg);
+ else
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, M_S, SLAVE, CommandReg);
+
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command1 = CommandReg;
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, COMMAND, CommandReg);
+ pSlinkHwRegs->IsMasterMode = IsMasterMode;
+}
+
+/**
+ * Initialize the slink register.
+ */
+static void
+SlinkHwResetFifo(
+ SerialHwRegisters *pSlinkHwRegs,
+ SerialHwFifo FifoType)
+{
+ NvU32 ResetBits = 0;
+ NvU32 StatusReg = SLINK_REG_READ32(pSlinkHwRegs->pRegsBaseAdd, STATUS);
+ if (FifoType & SerialHwFifo_Rx)
+ ResetBits = NV_DRF_NUM(SLINK, STATUS, RX_FLUSH, 1);
+ if (FifoType & SerialHwFifo_Tx)
+ ResetBits |= NV_DRF_NUM(SLINK, STATUS, TX_FLUSH, 1);
+
+ StatusReg |= ResetBits;
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, STATUS, StatusReg);
+
+ // Now wait till the flush bits become 0
+ do
+ {
+ StatusReg = SLINK_REG_READ32(pSlinkHwRegs->pRegsBaseAdd, STATUS);
+ } while (StatusReg & ResetBits);
+}
+
+/**
+ * Findout whether transmit fio is full or not
+ */
+static NvBool SlinkHwIsTransmitFifoFull(SerialHwRegisters *pSpiHwRegs)
+{
+ NvU32 StatusReg = SLINK_REG_READ32(pSpiHwRegs->pRegsBaseAdd, STATUS);
+ if (StatusReg & NV_DRF_DEF(SLINK, STATUS, TX_FULL, FULL))
+ return NV_TRUE;
+ return NV_FALSE;
+}
+
+
+/**
+ * Set the transfer order whether the bit will start from the lsb or from
+ * msb.
+ */
+static void
+SlinkHwSetTransferBitOrder(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvBool IsLsbFirst)
+{
+ NvU32 Command2Reg = pSlinkHwRegs->HwRegs.SlinkRegs.Command2;
+ if (IsLsbFirst)
+ Command2Reg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, LSBFE, LAST, Command2Reg);
+ else
+ Command2Reg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, LSBFE, FIRST, Command2Reg);
+
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command2 = Command2Reg;
+}
+
+/**
+ * Start the transfer of the communication.
+ */
+static void SlinkHwStartTransfer(SerialHwRegisters *pSlinkHwRegs, NvBool IsReconfigure)
+{
+ NvU32 DmaControlReg = pSlinkHwRegs->HwRegs.SlinkRegs.DmaControl;
+
+ // Program the packed mode
+ if (pSlinkHwRegs->IsPackedMode)
+ {
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, PACKED, ENABLE,
+ DmaControlReg);
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, DMA_CTL, DmaControlReg);
+
+ // Hw bug: Need to give some delay after setting the packed mode.
+ NvOsWaitUS(1);
+ }
+
+ // Enable the dma bit in the register variable only
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, DMA_EN, ENABLE, DmaControlReg);
+
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, DMA_CTL, DmaControlReg);
+}
+
+/**
+ * Enable/disable the data transfer flow.
+ */
+static void
+SlinkHwSetDataFlow(
+ SerialHwRegisters *pSlinkHwRegs,
+ SerialHwDataFlow DataFlow,
+ NvBool IsEnable)
+{
+ NvU32 CommandReg2 = pSlinkHwRegs->HwRegs.SlinkRegs.Command2;
+ if (DataFlow & SerialHwDataFlow_Rx)
+ {
+ if (IsEnable)
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, RXEN,
+ ENABLE, CommandReg2);
+ else
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, RXEN,
+ DISABLE, CommandReg2);
+ }
+
+ if (DataFlow & SerialHwDataFlow_Tx)
+ {
+ if (IsEnable)
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, TXEN,
+ ENABLE, CommandReg2);
+ else
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, TXEN,
+ DISABLE, CommandReg2);
+ }
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command2 = CommandReg2;
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, COMMAND2,
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command2);
+}
+
+
+/**
+ * Set the packet length and packed mode.
+ */
+static void
+SlinkHwSetPacketLength(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvU32 PacketLength,
+ NvBool IsPackedMode)
+{
+ NvU32 CommandReg1 = pSlinkHwRegs->HwRegs.SlinkRegs.Command1;
+ NvU32 DmaControlReg = pSlinkHwRegs->HwRegs.SlinkRegs.DmaControl;
+
+ CommandReg1 = NV_FLD_SET_DRF_NUM(SLINK, COMMAND, BIT_LENGTH,
+ (PacketLength -1), CommandReg1);
+
+ // Unset the packed bit if it is there
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, PACKED, DISABLE, DmaControlReg);
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, DMA_CTL, DmaControlReg);
+
+ if (IsPackedMode)
+ {
+ if (PacketLength == 4)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, PACK_SIZE, PACK4,
+ DmaControlReg);
+ else if (PacketLength == 8)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, PACK_SIZE, PACK8,
+ DmaControlReg);
+ else if (PacketLength == 16)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, PACK_SIZE, PACK16,
+ DmaControlReg);
+ else if (PacketLength == 32)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, PACK_SIZE, PACK32,
+ DmaControlReg);
+ }
+ else
+ {
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, PACK_SIZE, PACK4,
+ DmaControlReg);
+ }
+
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, COMMAND, CommandReg1);
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, DMA_CTL, DmaControlReg);
+
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command1 = CommandReg1;
+ pSlinkHwRegs->HwRegs.SlinkRegs.DmaControl = DmaControlReg;
+
+ pSlinkHwRegs->PacketLength = PacketLength;
+ pSlinkHwRegs->IsPackedMode = IsPackedMode;
+}
+
+/**
+ * Set the Dma transfer size.
+ */
+static void
+SlinkHwSetDmaTransferSize(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvU32 DmaBlockSize)
+{
+ pSlinkHwRegs->HwRegs.SlinkRegs.DmaControl =
+ NV_FLD_SET_DRF_NUM(SLINK, DMA_CTL, DMA_BLOCK_SIZE, (DmaBlockSize-1),
+ pSlinkHwRegs->HwRegs.SlinkRegs.DmaControl);
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, DMA_CTL,
+ pSlinkHwRegs->HwRegs.SlinkRegs.DmaControl);
+}
+
+static NvU32 SlinkHwGetTransferdCount(SerialHwRegisters *pSlinkHwRegs)
+{
+ NvU32 DmaBlockSize;
+ NvU32 StatusReg = SLINK_REG_READ32(pSlinkHwRegs->pRegsBaseAdd, STATUS);
+ DmaBlockSize = NV_DRF_VAL(SLINK, STATUS, BLK_CNT, StatusReg);
+ return (DmaBlockSize);
+}
+
+/**
+ * Set the trigger level.
+ */
+static void
+SlinkHwSetTriggerLevel(
+ SerialHwRegisters *pSlinkHwRegs,
+ SerialHwFifo FifoType,
+ NvU32 TriggerLevel)
+{
+ NvU32 DmaControlReg = pSlinkHwRegs->HwRegs.SlinkRegs.DmaControl;
+ switch(TriggerLevel)
+ {
+ case 4:
+ if (FifoType & SerialHwFifo_Rx)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, RX_TRIG, TRIG1,
+ DmaControlReg);
+ if (FifoType & SerialHwFifo_Tx)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, TX_TRIG, TRIG1,
+ DmaControlReg);
+ break;
+
+ case 16:
+ if (FifoType & SerialHwFifo_Rx)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, RX_TRIG, TRIG4,
+ DmaControlReg);
+ if (FifoType & SerialHwFifo_Tx)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, TX_TRIG, TRIG4,
+ DmaControlReg);
+ break;
+
+
+ case 32:
+ if (FifoType & SerialHwFifo_Rx)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, RX_TRIG, TRIG8,
+ DmaControlReg);
+ if (FifoType & SerialHwFifo_Tx)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, TX_TRIG, TRIG8,
+ DmaControlReg);
+ break;
+
+ case 64:
+ if (FifoType & SerialHwFifo_Rx)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, RX_TRIG, TRIG16,
+ DmaControlReg);
+ if (FifoType & SerialHwFifo_Tx)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, TX_TRIG, TRIG16,
+ DmaControlReg);
+ break;
+
+ default:
+ NV_ASSERT(!"Invalid Triggerlevel");
+ }
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, DMA_CTL, DmaControlReg);
+ pSlinkHwRegs->HwRegs.SlinkRegs.DmaControl = DmaControlReg;
+}
+
+/**
+ * Enable/disable the interrupt source.
+ */
+static void
+SlinkHwSetInterruptSource(
+ SerialHwRegisters *pSlinkHwRegs,
+ SerialHwDataFlow DataDirection,
+ NvBool IsEnable)
+{
+#if !NV_OAL
+ NvU32 DmaControlReg = pSlinkHwRegs->HwRegs.SlinkRegs.DmaControl;
+ if (DataDirection & SerialHwDataFlow_Rx)
+ {
+ if (IsEnable)
+ {
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, IE_RXC,
+ ENABLE, DmaControlReg);
+ }
+ else
+ {
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, IE_RXC,
+ DISABLE, DmaControlReg);
+ }
+ }
+
+ if (DataDirection & SerialHwDataFlow_Tx)
+ {
+ if (IsEnable)
+ {
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, IE_TXC,
+ ENABLE, DmaControlReg);
+ }
+ else
+ {
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SLINK, DMA_CTL, IE_TXC,
+ DISABLE, DmaControlReg);
+ }
+ }
+
+ pSlinkHwRegs->HwRegs.SlinkRegs.DmaControl = DmaControlReg;
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, DMA_CTL, DmaControlReg);
+#endif
+}
+
+/**
+ * Get the transfer status.
+ */
+static NvError SlinkHwGetTransferStatus(SerialHwRegisters *pSlinkHwRegs,
+ SerialHwDataFlow DataFlow)
+{
+ NvU32 StatusReg = SLINK_REG_READ32(pSlinkHwRegs->pRegsBaseAdd, STATUS);
+ pSlinkHwRegs->HwRegs.SlinkRegs.Status = StatusReg;
+ // Check for the receive error
+ if (DataFlow & SerialHwDataFlow_Rx)
+ {
+ if (StatusReg & RX_ERROR_STATUS)
+ return NvError_SpiReceiveError;
+ }
+
+ // Check for the transmit error
+ if (DataFlow & SerialHwDataFlow_Tx)
+ {
+ if (StatusReg & TX_ERROR_STATUS)
+ return NvError_SpiTransmitError;
+ }
+ return NvSuccess;
+}
+
+static void SlinkHwClearTransferStatus(SerialHwRegisters *pSlinkHwRegs,
+ SerialHwDataFlow DataFlow)
+{
+ NvU32 StatusReg = SLINK_REG_READ32(pSlinkHwRegs->pRegsBaseAdd, STATUS);
+
+ // Clear all the write 1 on clear status.
+ StatusReg &= (~ALL_SLINK_STATUS_CLEAR);
+
+ // Make ready clear to 1.
+ StatusReg = NV_FLD_SET_DRF_NUM(SLINK, STATUS, RDY, 1, StatusReg);
+
+ // Check for the receive error
+ if (DataFlow & SerialHwDataFlow_Rx)
+ StatusReg |= RX_ERROR_STATUS;
+
+ // Check for the transmit error
+ if (DataFlow & SerialHwDataFlow_Tx)
+ StatusReg |= TX_ERROR_STATUS;
+
+ // Write on slink status register.
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, STATUS, StatusReg);
+}
+
+/**
+ * Check whether transfer is completed or not.
+ */
+static NvBool SlinkHwIsTransferCompleted( SerialHwRegisters *pSlinkHwRegs)
+{
+ NvU32 StatusReg = SLINK_REG_READ32(pSlinkHwRegs->pRegsBaseAdd, STATUS);
+ if (StatusReg & NV_DRF_DEF(SLINK, STATUS, BSY, BUSY))
+ return NV_FALSE;
+
+ return NV_TRUE;
+}
+
+/**
+ * Initialize the slink intterface for the hw access.
+ */
+void NvRmPrivSpiSlinkInitSlinkInterface(HwInterface *pSlinkInterface)
+{
+ pSlinkInterface->HwControllerInitializeFxn = SlinkHwControllerInitialize;
+ pSlinkInterface->HwSetFunctionalModeFxn = SlinkHwSetFunctionalMode;
+ pSlinkInterface->HwResetFifoFxn = SlinkHwResetFifo;
+ pSlinkInterface->HwIsTransmitFifoFull = SlinkHwIsTransmitFifoFull;
+ pSlinkInterface->HwSetTransferBitOrderFxn = SlinkHwSetTransferBitOrder;
+ pSlinkInterface->HwStartTransferFxn = SlinkHwStartTransfer;
+ pSlinkInterface->HwSetDataFlowFxn = SlinkHwSetDataFlow;
+ pSlinkInterface->HwSetPacketLengthFxn = SlinkHwSetPacketLength;
+ pSlinkInterface->HwSetDmaTransferSizeFxn = SlinkHwSetDmaTransferSize;
+ pSlinkInterface->HwGetTransferdCountFxn = SlinkHwGetTransferdCount;
+ pSlinkInterface->HwSetTriggerLevelFxn = SlinkHwSetTriggerLevel;
+ pSlinkInterface->HwSetInterruptSourceFxn = SlinkHwSetInterruptSource;
+ pSlinkInterface->HwGetTransferStatusFxn = SlinkHwGetTransferStatus;
+ pSlinkInterface->HwClearTransferStatusFxn = SlinkHwClearTransferStatus;
+ pSlinkInterface->HwIsTransferCompletedFxn = SlinkHwIsTransferCompleted;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/rm_dma_hw_private.c b/arch/arm/mach-tegra/nvrm/io/ap15/rm_dma_hw_private.c
new file mode 100644
index 000000000000..9cf0eacd715e
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/rm_dma_hw_private.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * DMA Resource manager private API for Hw access </b>
+ *
+ * @b Description: Implements the private interface of the nvrm dma to access
+ * the hw apb dma register.
+ *
+ * This files implements the API for accessing the register of the Dma
+ * controller and configure the dma transfers.
+ */
+
+#include "nvrm_dma.h"
+#include "nvrm_drf.h"
+#include "nvrm_hardware_access.h"
+#include "rm_dma_hw_private.h"
+#include "ap15/arapbdma.h"
+#include "ap15/arapbdmachan.h"
+#include "nvrm_drf.h"
+#include "nvassert.h"
+
+#define APBDMACHAN_READ32(pVirtBaseAdd, reg) \
+ NV_READ32((pVirtBaseAdd) + ((APBDMACHAN_CHANNEL_0_##reg##_0)/4))
+#define APBDMACHAN_WRITE32(pVirtBaseAdd, reg, val) \
+ do { \
+ NV_WRITE32(((pVirtBaseAdd) + ((APBDMACHAN_CHANNEL_0_##reg##_0)/4)), (val)); \
+ } while(0)
+
+
+/**
+ * Global Enable/disable the apb dma controller.
+ */
+static void GlobalSetApbDma(NvU32 *pGenVirtBaseAddress, NvBool IsEnable)
+{
+ NvU32 CommandRegs = 0;
+
+ // Read the apb dma command register.
+ CommandRegs = NV_READ32((pGenVirtBaseAddress + (APBDMA_COMMAND_0/4)));
+
+ // Enable/disable the global enable bit of this register.
+ if(IsEnable)
+ CommandRegs = NV_FLD_SET_DRF_DEF(APBDMA, COMMAND, GEN, ENABLE, CommandRegs);
+ else
+ CommandRegs = NV_FLD_SET_DRF_DEF(APBDMA, COMMAND, GEN, DISABLE, CommandRegs);
+
+ // Write into the register.
+ NV_WRITE32( (pGenVirtBaseAddress + ( APBDMA_COMMAND_0/4)),CommandRegs);
+}
+
+
+/**
+ * Configure the address registers of the apb dma for data transfer.
+ */
+static void
+ConfigureApbDmaAddress(
+ DmaChanRegisters *pDmaChRegs,
+ NvRmPhysAddr SourceAdd,
+ NvRmPhysAddr DestAdd,
+ NvBool IsSourceAddPeripheralType)
+{
+ pDmaChRegs->ApbAddressReg = (IsSourceAddPeripheralType)? SourceAdd: DestAdd;
+ pDmaChRegs->AhbAddressReg = (IsSourceAddPeripheralType)? DestAdd: SourceAdd;
+}
+
+
+/**
+ * Set the data transfer size for the apb dma.
+ */
+static void
+SetApbDmaTransferSize(
+ DmaChanRegisters *pDmaChRegs,
+ NvU32 TransferSize,
+ NvBool IsDoubleBuffMode)
+{
+ // If double buff mode the programmed word count will be half of the data
+ // request.
+ NvU32 WordCount = (IsDoubleBuffMode)? (TransferSize >> 3): (TransferSize >> 2);
+
+ // Configure the word count in the control register.
+ pDmaChRegs->ControlReg = NV_FLD_SET_DRF_NUM(APBDMACHAN_CHANNEL_0, CSR, WCOUNT,
+ (WordCount-1), pDmaChRegs->ControlReg);
+}
+
+
+/**
+ * Add the transferred count for apb dma.
+ */
+static void AddApbDmaTransferredCount(DmaChanRegisters *pDmaChRegs)
+{
+ NvU32 ProgrammedWordCount;
+
+ // Get the programmed transfer count.
+ ProgrammedWordCount = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, CSR, WCOUNT,
+ pDmaChRegs->ControlReg);
+ ProgrammedWordCount = (ProgrammedWordCount +1) << 2;
+ pDmaChRegs->TransferedCount += ProgrammedWordCount;
+
+ // Limiting the transfer count to not be more than 2 times
+ if (pDmaChRegs->TransferedCount > (ProgrammedWordCount << 1))
+ pDmaChRegs->TransferedCount = ProgrammedWordCount << 1;
+}
+
+static void AckNClearApbDmaInterrupt(DmaChanRegisters *pDmaChRegs)
+{
+ NvU32 DmaStatusReg;
+ // Get the status of the dma channel.
+ DmaStatusReg = APBDMACHAN_READ32(pDmaChRegs->pHwDmaChanReg, STA);
+
+ // Write 1 on clear
+ if (DmaStatusReg & NV_DRF_DEF(APBDMACHAN_CHANNEL_0, STA, ISE_EOC, INTR))
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, STA, DmaStatusReg);
+}
+/**
+ * Check whether the dma transfer is completed or not for the given channel.
+ */
+static NvBool IsApbDmaTransferCompleted(DmaChanRegisters *pDmaChRegs)
+{
+ NvU32 DmaStatusReg;
+
+ // Get the status of the dma channel.
+ DmaStatusReg = APBDMACHAN_READ32(pDmaChRegs->pHwDmaChanReg, STA);
+ if (DmaStatusReg & NV_DRF_DEF(APBDMACHAN_CHANNEL_0, STA, ISE_EOC, INTR))
+ {
+ // Write the status to clear it
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, STA, DmaStatusReg);
+ return NV_TRUE;
+ }
+ else
+ return NV_FALSE;
+}
+
+/**
+ * Get the transferred count for apb dma.
+ */
+static NvU32 GetApbDmaTransferredCount(DmaChanRegisters *pDmaChRegs)
+{
+ NvU32 DmaStatusReg;
+ NvU32 ProgrammedWordCount;
+ NvU32 RemainingWordCount;
+ NvU32 TransferedSize;
+ NvU32 RetTransferSize;
+
+ // Get the status of the dma channel.
+ DmaStatusReg = APBDMACHAN_READ32(pDmaChRegs->pHwDmaChanReg, STA);
+ ProgrammedWordCount = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, CSR, WCOUNT,
+ pDmaChRegs->ControlReg);
+ RemainingWordCount = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, STA, COUNT, DmaStatusReg);
+ if (IsApbDmaTransferCompleted(pDmaChRegs))
+ AddApbDmaTransferredCount(pDmaChRegs);
+
+ if (DmaStatusReg & NV_DRF_DEF(APBDMACHAN_CHANNEL_0, STA, BSY, ACTIVE))
+ {
+ if (RemainingWordCount)
+ TransferedSize = (ProgrammedWordCount - RemainingWordCount);
+ else
+ TransferedSize = (ProgrammedWordCount);
+ }
+ else
+ {
+ TransferedSize = (ProgrammedWordCount +1 );
+ }
+ RetTransferSize = (TransferedSize << 2) + pDmaChRegs->TransferedCount;
+ pDmaChRegs->TransferedCount = 0;
+ return (RetTransferSize);
+}
+
+
+/**
+ * Get the transferred count for apb dma.
+ */
+static NvU32 GetApbDmaTransferredCountWithStop(
+ DmaChanRegisters *pDmaChRegs,
+ NvBool IsTransferStop)
+{
+ NvU32 DmaStatusReg;
+ NvU32 FlowCtrlReg;
+ NvU32 ProgrammedWordCount;
+ NvU32 RemainingWordCount;
+ NvU32 TransferedSize;
+ NvU32 RetTransferSize;
+
+ if (IsApbDmaTransferCompleted(pDmaChRegs))
+ AddApbDmaTransferredCount(pDmaChRegs);
+
+ if (IsTransferStop)
+ {
+ FlowCtrlReg = APBDMACHAN_READ32(pDmaChRegs->pHwDmaChanReg, CSR);
+ FlowCtrlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ CSR, REQ_SEL, NA31,
+ FlowCtrlReg);
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, CSR, FlowCtrlReg);
+ }
+
+ // Get the status of the dma channel.
+ DmaStatusReg = APBDMACHAN_READ32(pDmaChRegs->pHwDmaChanReg, STA);
+ ProgrammedWordCount = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, CSR, WCOUNT,
+ pDmaChRegs->ControlReg);
+ RemainingWordCount = NV_DRF_VAL(APBDMACHAN_CHANNEL_0, STA, COUNT, DmaStatusReg);
+ if (DmaStatusReg & NV_DRF_DEF(APBDMACHAN_CHANNEL_0, STA, BSY, ACTIVE))
+ {
+ if (RemainingWordCount)
+ TransferedSize = (ProgrammedWordCount - RemainingWordCount);
+ else
+ TransferedSize = (ProgrammedWordCount);
+ }
+ else
+ {
+ TransferedSize = (ProgrammedWordCount+1);
+ }
+ RetTransferSize = (TransferedSize << 2) + pDmaChRegs->TransferedCount;
+ pDmaChRegs->TransferedCount = 0;
+ return (RetTransferSize);
+}
+
+/**
+ * Set the dma burst size in the dma registers.
+ */
+static void
+SetDmaBurstSize(
+ DmaChanRegisters *pDmaChRegs,
+ NvRmDmaModuleID DmaReqModuleId,
+ NvU32 TransferSize)
+{
+ // Check for the dma requestor Id and based on the requestor and module Id
+ // Select the burst size.
+ switch (DmaReqModuleId)
+ {
+ case NvRmDmaModuleID_Uart:
+ case NvRmDmaModuleID_I2c:
+ case NvRmDmaModuleID_Dvc:
+
+ // Dma requestor is the uart/I2c/DvcI2c.
+ // Set the dma burst size to 1 words.
+ pDmaChRegs->AhbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ AHB_SEQ, AHB_BURST, DMA_BURST_1WORDS,
+ pDmaChRegs->AhbSequenceReg);
+ break;
+
+ case NvRmDmaModuleID_I2s:
+ case NvRmDmaModuleID_Spdif:
+ // Dma requestor is the i2s.
+ pDmaChRegs->AhbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ AHB_SEQ, AHB_BURST, DMA_BURST_4WORDS,
+ pDmaChRegs->AhbSequenceReg);
+ break;
+
+ case NvRmDmaModuleID_Slink:
+ case NvRmDmaModuleID_Spi:
+ // Dma requestor is the spi/slink.
+ if ((TransferSize & 0xF) == 0)
+ {
+ // Multiple of 4 words
+ pDmaChRegs->AhbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ AHB_SEQ, AHB_BURST, DMA_BURST_4WORDS,
+ pDmaChRegs->AhbSequenceReg);
+ }
+ else
+ {
+ // Non multiple of 4 words
+ pDmaChRegs->AhbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ AHB_SEQ, AHB_BURST, DMA_BURST_1WORDS,
+ pDmaChRegs->AhbSequenceReg);
+ }
+ break;
+
+ case NvRmDmaModuleID_Vfir:
+ case NvRmDmaModuleID_Mipi:
+ if ((TransferSize & 0x1F))
+ {
+ // Non multiple of 8 words
+ if (TransferSize & 0xF)
+ {
+ // Non multiple of 4 words
+ pDmaChRegs->AhbSequenceReg = NV_FLD_SET_DRF_DEF(
+ APBDMACHAN_CHANNEL_0, AHB_SEQ, AHB_BURST,
+ DMA_BURST_1WORDS, pDmaChRegs->AhbSequenceReg);
+ }
+ else
+ {
+ pDmaChRegs->AhbSequenceReg = NV_FLD_SET_DRF_DEF(
+ APBDMACHAN_CHANNEL_0, AHB_SEQ, AHB_BURST,
+ DMA_BURST_4WORDS, pDmaChRegs->AhbSequenceReg);
+ }
+ }
+ else
+ {
+ pDmaChRegs->AhbSequenceReg = NV_FLD_SET_DRF_DEF(
+ APBDMACHAN_CHANNEL_0, AHB_SEQ, AHB_BURST,
+ DMA_BURST_8WORDS, pDmaChRegs->AhbSequenceReg);
+ }
+ break;
+
+ default:
+ NV_ASSERT(!"Invalid module");
+ }
+}
+
+
+/**
+ * Enable the bit swap for the destionation address for apb dma.
+ */
+static void
+EnableApbDmaDestBitSwap(
+ DmaChanRegisters *pDmaChRegs,
+ NvBool IsDestAddPeripheralType)
+{
+ // Source to destination address.
+ if (IsDestAddPeripheralType)
+ {
+ // Enable the bit swap to the Peripheral address.
+ pDmaChRegs->ApbSequenceReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0,
+ APB_SEQ, APB_DATA_SWAP, ENABLE,
+ pDmaChRegs->ApbSequenceReg);
+ }
+ else
+ {
+ // Enable the bit swap to the memory address.
+ pDmaChRegs->AhbSequenceReg = NV_FLD_SET_DRF_NUM(APBDMACHAN_CHANNEL_0,
+ AHB_SEQ, AHB_DATA_SWAP, 1,
+ pDmaChRegs->AhbSequenceReg);
+ }
+}
+
+/**
+ * Set the address wrapping information for apb dma.
+ * The different address wrapping is supported by APB dma.
+ */
+static void
+SetApbDmaAddressWrapping(
+ DmaChanRegisters *pDmaChRegs,
+ NvRmPhysAddr SourceAddWrap,
+ NvRmPhysAddr DestAddWrap,
+ NvU32 TransferSize,
+ NvBool IsSourceAddPeripheralType)
+{
+ NvU32 ApbWrapSizeInWords;
+ NvU32 AhbWrapSizeInWords;
+
+ // Supported address wrap on ahb side. These are in word (4 bytes) count.
+ NvU32 SupportedAhbSideAddWrapSize[8] = {0, 32, 64, 128, 256, 512,1024, 2048};
+
+ // Supported address wrap on apb side. These are in words (4 bytes) count.
+ NvU32 SupportedApbSideAddWrapSize[8] = {0, 1, 2, 4, 8, 16, 32, 64};
+
+ int MaxSupportedTable = 8;
+ int ApbWrapIndex = 0;
+ int AhbWrapIndex = 0;
+
+ // Converting the address wrapping size in words and storing in the
+ // variable as per source and destination module type.
+ ApbWrapSizeInWords = (IsSourceAddPeripheralType)? SourceAddWrap: DestAddWrap;
+ AhbWrapSizeInWords = (IsSourceAddPeripheralType)? DestAddWrap : SourceAddWrap;
+
+ ApbWrapSizeInWords = ApbWrapSizeInWords >> 2;
+ AhbWrapSizeInWords = AhbWrapSizeInWords >> 2;
+
+ // Check for the supported address wrap for APB Side
+ for (ApbWrapIndex = 0; ApbWrapIndex < MaxSupportedTable; ++ApbWrapIndex)
+ {
+ if (ApbWrapSizeInWords == SupportedApbSideAddWrapSize[ApbWrapIndex])
+ break;
+ }
+ NV_ASSERT(ApbWrapIndex < MaxSupportedTable);
+
+ // Check for the supported address wrap for AHB Side
+ for (AhbWrapIndex = 0; AhbWrapIndex < MaxSupportedTable; ++AhbWrapIndex)
+ {
+ if (AhbWrapSizeInWords == SupportedAhbSideAddWrapSize[AhbWrapIndex])
+ break;
+ }
+ NV_ASSERT(AhbWrapIndex < MaxSupportedTable);
+
+ // Configure the registers.
+ pDmaChRegs->ApbSequenceReg = NV_FLD_SET_DRF_NUM(APBDMACHAN_CHANNEL_0,
+ APB_SEQ, APB_ADDR_WRAP, ApbWrapIndex,
+ pDmaChRegs->ApbSequenceReg);
+
+ pDmaChRegs->AhbSequenceReg = NV_FLD_SET_DRF_NUM(APBDMACHAN_CHANNEL_0,
+ AHB_SEQ, WRAP, AhbWrapIndex,
+ pDmaChRegs->AhbSequenceReg);
+}
+
+/**
+ * Start the apb dma transfer from the current request. This will read the
+ * dma register information from the dma configuration register and program the
+ * dma register and start the transfer.
+ */
+static void StartApbDmaTransfer(DmaChanRegisters *pDmaChRegs)
+{
+ NvU32 DmaStartCommand;
+
+ pDmaChRegs->TransferedCount = 0;
+
+ // Write configured data into the hw register of dma.
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, CSR, pDmaChRegs->ControlReg);
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, AHB_SEQ, pDmaChRegs->AhbSequenceReg);
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, AHB_PTR, pDmaChRegs->AhbAddressReg);
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, APB_SEQ, pDmaChRegs->ApbSequenceReg);
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, APB_PTR, pDmaChRegs->ApbAddressReg);
+
+ // Start the dma transfer.
+ DmaStartCommand = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, ENB, ENABLE,
+ pDmaChRegs->ControlReg);
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, CSR, DmaStartCommand);
+}
+
+/**
+ * Continue the apb dma transfer special for the continuous mode.
+ */
+static void ContinueApbDmaTransfer(DmaChanRegisters *pDmaChRegs)
+{
+ NvU32 DmaStartCommand;
+ NvU32 CurrControlReg;
+ NvU32 NewControlReg;
+
+ NewControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, ENB, ENABLE,
+ pDmaChRegs->ControlReg);
+ CurrControlReg = APBDMACHAN_READ32(pDmaChRegs->pHwDmaChanReg, CSR);
+
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, AHB_PTR, pDmaChRegs->AhbAddressReg);
+
+ // Write the control register only when there is difference between the
+ // current setting and new setting.
+ if (NewControlReg != CurrControlReg)
+ {
+ // Start the dma transfer.
+ DmaStartCommand = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, ENB,
+ ENABLE, pDmaChRegs->ControlReg);
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, CSR, DmaStartCommand);
+ }
+}
+
+/**
+ * Start the Apb dma transfer from the current request. This will read the
+ * current configured address from the register and increment them as per
+ * passed parameter and start the dma transfer.
+ */
+static void
+StartApbDmaWithAddInc(
+ DmaChanRegisters *pDmaChRegs,
+ NvU32 PeriAddIncSize,
+ NvU32 MemoryAddIncSize,
+ NvU32 IsContMode)
+{
+ NvU32 NewControlReg;
+ NvU32 CurrControlReg;
+ NvU32 AhbAddress;
+
+ NewControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, ENB, ENABLE,
+ pDmaChRegs->ControlReg);
+
+ // Read the addresses programmed in the dma hw registers.
+ AhbAddress = APBDMACHAN_READ32(pDmaChRegs->pHwDmaChanReg, AHB_PTR);
+
+ // Increment the address and write back the new address.
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, AHB_PTR,
+ (AhbAddress + MemoryAddIncSize));
+
+ // If it is continuous mode and old control information is same as the new
+ // one then need nt to rewrite the control resgiter.
+ if (IsContMode)
+ {
+ CurrControlReg = APBDMACHAN_READ32(pDmaChRegs->pHwDmaChanReg, CSR);
+ if (CurrControlReg == NewControlReg)
+ return;
+ }
+ // Start the dma transfer.
+ NewControlReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, ENB, ENABLE,
+ NewControlReg);
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, CSR, NewControlReg);
+}
+
+
+
+/**
+ * Stop the data transfer in the given APB/AHB dma channel number.
+ */
+static void StopApbDmaTransfer(DmaChanRegisters *pDmaChRegs)
+{
+ NvU32 DmaCommandReg;
+
+ // Stop the dma transfer.
+ // First disable the interrupt and then diasable the dma enable bit.
+ DmaCommandReg = APBDMACHAN_READ32(pDmaChRegs->pHwDmaChanReg, CSR);
+ DmaCommandReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, IE_EOC, DISABLE,
+ DmaCommandReg);
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, CSR, DmaCommandReg);
+
+ DmaCommandReg = NV_FLD_SET_DRF_DEF(APBDMACHAN_CHANNEL_0, CSR, ENB, DISABLE,
+ DmaCommandReg);
+ APBDMACHAN_WRITE32(pDmaChRegs->pHwDmaChanReg, CSR, DmaCommandReg);
+ AckNClearApbDmaInterrupt(pDmaChRegs);
+}
+
+
+/**
+ * Tells whether the given address is valid peripheral device address or not.
+ */
+NvBool NvRmPrivDmaHwIsValidPeripheralAddress(NvRmPhysAddr PhysAddress)
+{
+ NvU32 Address32Bit;
+ NvU32 MostSignificantNibble;
+
+ // Get the most significant nibble
+ Address32Bit = (NvU32)PhysAddress;
+ MostSignificantNibble = Address32Bit >> 28;
+
+ // Only address start at 7XXX:XXXX address are the valid device address.
+ if (MostSignificantNibble == 7)
+ return NV_TRUE;
+ return NV_FALSE;
+}
+
+void NvRmPrivDmaInitDmaHwInterfaces(DmaHwInterface *pApbDmaInterface)
+{
+ pApbDmaInterface->DmaHwGlobalSetFxn = GlobalSetApbDma;
+ pApbDmaInterface->DmaHwConfigureAddressFxn = ConfigureApbDmaAddress;
+ pApbDmaInterface->DmaHwSetTransferSizeFxn = SetApbDmaTransferSize;
+ pApbDmaInterface->DmaHwGetTransferredCountFxn = GetApbDmaTransferredCount;
+ pApbDmaInterface->DmaHwGetTransferredCountWithStopFxn = GetApbDmaTransferredCountWithStop;
+ pApbDmaInterface->DmaHwAddTransferCountFxn = AddApbDmaTransferredCount;
+ pApbDmaInterface->DmaHwSetBurstSizeFxn = SetDmaBurstSize;
+ pApbDmaInterface->DmaHwEnableDestBitSwapFxn = EnableApbDmaDestBitSwap;
+ pApbDmaInterface->DmaHwSetAddressWrappingFxn = SetApbDmaAddressWrapping;
+ pApbDmaInterface->DmaHwStartTransferFxn = StartApbDmaTransfer;
+ pApbDmaInterface->DmaHwContinueTransferFxn = ContinueApbDmaTransfer;
+ pApbDmaInterface->DmaHwStartTransferWithAddIncFxn = StartApbDmaWithAddInc;
+ pApbDmaInterface->DmaHwStopTransferFxn = StopApbDmaTransfer;
+ pApbDmaInterface->DmaHwIsTransferCompletedFxn = IsApbDmaTransferCompleted;
+ pApbDmaInterface->DmaHwAckNClearInterruptFxn = AckNClearApbDmaInterrupt;
+
+ NvRmPrivDmaInitAp15DmaHwInterfaces(pApbDmaInterface);
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/rm_dma_hw_private.h b/arch/arm/mach-tegra/nvrm/io/ap15/rm_dma_hw_private.h
new file mode 100644
index 000000000000..aa7b4175d8e2
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/rm_dma_hw_private.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Private functions for the dma resource manager</b>
+ *
+ * @b Description: Defines the HW access of the apb dma register.
+ *
+ */
+
+#ifndef INCLUDED_NVRM_DMA_HW_PRIVATE_H
+#define INCLUDED_NVRM_DMA_HW_PRIVATE_H
+
+/**
+ * @defgroup nvrm_dma Direct Memory Access (DMA) Hw controller interface API.
+ *
+ * This is the Hw Dma controller interface. These API provides the register
+ * access of the dma controller register. This configures the hw related dma
+ * information in the passed parameters.
+ *
+ * @ingroup nvddk_rm
+ *
+ * @{
+ */
+
+#include "nvcommon.h"
+#include "nvrm_dma.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * Combines the apb Dma regsiters physical base address, channel address,
+ * bank size of the channel address and general controller address.
+ */
+typedef struct
+{
+ NvU32 GenAddBankSize;
+ NvU32 *pGenVirtBaseAdd;
+} DmaGenRegisters;
+
+
+/**
+ * Combines the Dma register which contains the APB register sets.
+ */
+typedef struct
+{
+ // Virtual address Pointer to the dma channel base register.
+ NvU32 *pHwDmaChanReg;
+
+ NvU32 ControlReg;
+ NvU32 StatusReg;
+ NvU32 AhbAddressReg;
+ NvU32 ApbAddressReg;
+ NvU32 XmbAddressReg;
+ NvU32 AhbSequenceReg;
+ NvU32 XmbSequenceReg;
+ NvU32 ApbSequenceReg;
+
+ NvU32 TransferedCount;
+} DmaChanRegisters;
+
+
+typedef struct DmaHwInterfaceRec
+{
+ /**
+ * Configure the Apb dma register as per clients information.
+ * This function do the register setting based on device Id and will be stored
+ * in the dma handle. This information will be used when there is dma transfer
+ * request and want to configure the dma controller registers.
+ */
+ void
+ (*DmaHwInitRegistersFxn)(
+ DmaChanRegisters *pDmaChRegs,
+ NvRmDmaModuleID DmaReqModuleId,
+ NvU32 DmaReqInstId);
+
+ /**
+ * Global Enable/disable the dma controller.
+ */
+ void (*DmaHwGlobalSetFxn)(NvU32 *pGenVirtBaseAddress, NvBool IsEnable);
+
+ /**
+ * Continue the remaining transfer.
+ */
+ void (*DmaContinueRemainingTransferFxn)(void *pDmaChannel);
+
+ NvError (*LogDmaTransferRequestFxn)(NvRmDmaHandle hDma, void *pCurrRequest);
+
+ /**
+ * Configure the address registers of the dma from the client buffer
+ * source and destination address.
+ */
+ void
+ (*DmaHwConfigureAddressFxn)(
+ DmaChanRegisters *pDmaChRegs,
+ NvRmPhysAddr SourceAdd,
+ NvRmPhysAddr DestAdd,
+ NvBool IsSourceAddPeripheralXmbType);
+
+ /**
+ * Set the data transfer size for the apb dma.
+ */
+ void
+ (*DmaHwSetTransferSizeFxn)(
+ DmaChanRegisters *pDmaChRegs,
+ NvU32 TransferSize,
+ NvBool IsDoubleBuffMode);
+
+ /**
+ * Get the transferred count for apb dma.
+ */
+ NvU32 (*DmaHwGetTransferredCountFxn)(DmaChanRegisters *pDmaChRegs);
+
+ /**
+ * Get the transferred count for apb dma.
+ */
+ NvU32 (*DmaHwGetTransferredCountWithStopFxn)(
+ DmaChanRegisters *pDmaChRegs,
+ NvBool IsTransferStop);
+
+ /**
+ * Add the transfer count in the dma transferred size.
+ */
+ void (*DmaHwAddTransferCountFxn)(DmaChanRegisters *pDmaChRegs);
+
+ /**
+ * Set the transferred mode for apb dma.
+ */
+ void
+ (*DmaHwSetTransferModeFxn)(
+ DmaChanRegisters *pDmaChRegs,
+ NvBool IsContinuousMode,
+ NvBool IsDoubleBuffMode);
+
+ /**
+ * Set the dma direction of data transfer.
+ */
+ void
+ (*DmaHwSetDirectionFxn)(
+ DmaChanRegisters *pDmaChRegs,
+ NvBool IsSourceAddPerXmbType);
+
+ /**
+ * Set the dma burst size in the dma registers copy.
+ */
+ void
+ (*DmaHwSetBurstSizeFxn)(
+ DmaChanRegisters *pDmaChRegs,
+ NvRmDmaModuleID DmaReqModuleId,
+ NvU32 TransferSize);
+
+ /**
+ * Enable the bit swap for the destionation address for apb dma.
+ */
+ void
+ (*DmaHwEnableDestBitSwapFxn)(
+ DmaChanRegisters *pDmaChRegs,
+ NvBool IsDestAddPeripheralXmbType);
+
+ /**
+ * Set the address wrapping information for dma.
+ * The different address wrapping is supported by dma.
+ */
+ void
+ (*DmaHwSetAddressWrappingFxn)(
+ DmaChanRegisters *pDmaChRegs,
+ NvRmPhysAddr SourceAddWrap,
+ NvRmPhysAddr DestAddWrap,
+ NvU32 TransferSize,
+ NvBool IsSourceAddPeripheralXmbType);
+
+ /**
+ * Start the dma transfer from the current request.
+ * This will start the dma transfer.
+ */
+ void (*DmaHwStartTransferFxn)(DmaChanRegisters *pDmaChRegs);
+
+ /**
+ * Continue the dma transfer special for the continuous mode.
+ */
+ void (*DmaHwContinueTransferFxn)(DmaChanRegisters *pDmaChRegs);
+
+ /**
+ * Start the dma transfer from the current request. This will read the
+ * current configured address from the register and increment them as per
+ * passed parameter and start the dma transfer.
+ */
+ void
+ (*DmaHwStartTransferWithAddIncFxn)(
+ DmaChanRegisters *pDmaChRegs,
+ NvU32 XmbPeriAddIncSize,
+ NvU32 MemoryAddIncSize,
+ NvU32 IsContMode);
+
+ /**
+ * Stop the data transfer in the given dma channel number.
+ */
+ void (*DmaHwStopTransferFxn)(DmaChanRegisters *pDmaChRegs);
+
+ /**
+ * Check whether the dma transfer is completed or not for the given channel.
+ */
+ NvBool (*DmaHwIsTransferCompletedFxn)(DmaChanRegisters *pDmaChRegs);
+
+ /**
+ * Ack and clear the interrupt of dma channel.
+ */
+ void (*DmaHwAckNClearInterruptFxn)(DmaChanRegisters *pDmaChRegs);
+} DmaHwInterface;
+
+
+
+/**
+ * Tells whether the given address is on Xmb or not.
+ */
+NvBool NvRmPrivDmaHwIsValidXmbAddress(NvRmPhysAddr PhysAddress);
+
+/**
+ * Tells whether the given address is valid peripheral device address or not.
+ */
+NvBool NvRmPrivDmaHwIsValidPeripheralAddress(NvRmPhysAddr PhysAddress) ;
+
+
+void NvRmPrivDmaInitAp15DmaHwInterfaces(DmaHwInterface *pApbDmaInterface);
+
+void NvRmPrivDmaInitDmaHwInterfaces(DmaHwInterface *pApbDmaInterface);
+
+NvU32 NvRmPrivDmaInterruptDecode(NvRmDeviceHandle hRmDevice );
+
+void NvRmPrivDmaInterruptEnable(NvRmDeviceHandle hRmDevice, NvU32 Channel, NvBool Enable );
+
+#if defined(__cplusplus)
+}
+#endif
+
+/** @} */
+
+#endif // INCLUDED_NVRM_DMA_HW_PRIVATE_H
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_hw_private.c b/arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_hw_private.c
new file mode 100644
index 000000000000..2d4a5e35bf09
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_hw_private.c
@@ -0,0 +1,612 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Private functions implementation for the spi Ddk driver</b>
+ *
+ * @b Description: Implements the private functions for the spi hw interface.
+ *
+ */
+
+#include "rm_spi_slink_hw_private.h"
+#include "nvrm_drf.h"
+#include "nvrm_hardware_access.h"
+#include "nvassert.h"
+
+// hardware includes
+#include "ap15/arspi.h"
+
+#define SPI_REG_READ32(pSpiHwRegsVirtBaseAdd, reg) \
+ NV_READ32((pSpiHwRegsVirtBaseAdd) + ((SPI_##reg##_0)/4))
+#define SPI_REG_WRITE32(pSpiHwRegsVirtBaseAdd, reg, val) \
+ do { \
+ NV_WRITE32((((pSpiHwRegsVirtBaseAdd) + ((SPI_##reg##_0)/4))), (val)); \
+ } while (0)
+
+#define MAX_SPI_FIFO_DEPTH 4
+
+#define RESET_ALL_CS \
+ (NV_DRF_DEF(SPI, COMMAND, CS0_EN, ENABLE) | \
+ NV_DRF_DEF(SPI, COMMAND, CS1_EN, ENABLE) | \
+ NV_DRF_DEF(SPI, COMMAND, CS2_EN, ENABLE) | \
+ NV_DRF_DEF(SPI, COMMAND, CS3_EN, ENABLE))
+
+#define ALL_SPI_STATUS_CLEAR \
+ (NV_DRF_NUM(SPI, STATUS, RDY, 1) | \
+ NV_DRF_NUM(SPI, STATUS, RXF_UNR, 1) | \
+ NV_DRF_NUM(SPI, STATUS, TXF_OVF, 1))
+
+static void
+SpiHwSetSignalMode(
+ SerialHwRegisters *pSpiHwRegs,
+ NvOdmQuerySpiSignalMode SignalMode);
+
+/**
+ * Initialize the spi register.
+ */
+static void
+SpiHwRegisterInitialize(
+ NvU32 SerialInstanceId,
+ SerialHwRegisters *pSpiHwRegs)
+{
+ NvU32 CommandReg;
+ pSpiHwRegs->InstanceId = SerialInstanceId;
+ pSpiHwRegs->pRegsBaseAdd = NULL;
+ pSpiHwRegs->RegBankSize = 0;
+ pSpiHwRegs->HwTxFifoAdd = SPI_TX_FIFO_0;
+ pSpiHwRegs->HwRxFifoAdd = SPI_RX_FIFO_0;
+ pSpiHwRegs->IsPackedMode = NV_FALSE;
+ pSpiHwRegs->PacketLength = 1;
+ pSpiHwRegs->CurrSignalMode = NvOdmQuerySpiSignalMode_Invalid;
+ pSpiHwRegs->MaxWordTransfer = MAX_SPI_FIFO_DEPTH;
+ pSpiHwRegs->IsLsbFirst = NV_FALSE;
+ pSpiHwRegs->IsMasterMode = NV_TRUE;
+ pSpiHwRegs->IsNonWordAlignedPackModeSupported = NV_TRUE;
+
+ CommandReg = NV_RESETVAL(SPI, COMMAND);
+ // Initialize the chip select bits to select the s/w only
+ CommandReg = NV_FLD_SET_DRF_NUM(SPI, COMMAND, CS_SOFT, 1, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_NUM(SPI, COMMAND, CS_VAL, 1, CommandReg);
+
+ if (pSpiHwRegs->IsIdleDataOutHigh)
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, ACTIVE_SDA, DRIVE_HIGH, CommandReg);
+ else
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, ACTIVE_SDA, DRIVE_LOW, CommandReg);
+
+ pSpiHwRegs->HwRegs.SpiRegs.Command = CommandReg;
+ pSpiHwRegs->HwRegs.SpiRegs.Status = NV_RESETVAL(SPI, STATUS);
+ pSpiHwRegs->HwRegs.SpiRegs.DmaControl = NV_RESETVAL(SPI, DMA_CTL);
+}
+
+static void SpiHwControllerInitialize(SerialHwRegisters *pSpiHwRegs)
+{
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, COMMAND,
+ pSpiHwRegs->HwRegs.SpiRegs.Command);
+}
+
+/**
+ * Set the functional mode whether this is the master or slave mode.
+ */
+static void
+SpiHwSetFunctionalMode(
+ SerialHwRegisters *pSpiHwRegs,
+ NvBool IsMasterMode)
+{
+ // Slave mode is not supported.
+ if (!IsMasterMode)
+ NV_ASSERT(!"Not Supported");
+}
+
+
+/**
+ * Initialize the spi register.
+ */
+static void
+SpiHwResetFifo(
+ SerialHwRegisters *pSpiHwRegs,
+ SerialHwFifo FifoType)
+{
+ NvU32 ResetBits = 0;
+
+ NvU32 StatusReg = SPI_REG_READ32(pSpiHwRegs->pRegsBaseAdd, STATUS);
+ if (FifoType & SerialHwFifo_Rx)
+ ResetBits = NV_DRF_NUM(SPI, STATUS, RXF_FLUSH, 1);
+ if (FifoType & SerialHwFifo_Tx)
+ ResetBits |= NV_DRF_NUM(SPI, STATUS, RXF_FLUSH, 1);
+
+ StatusReg |= ResetBits;
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, STATUS, StatusReg);
+
+ // Now wait till the flush bits become 0
+ StatusReg = SPI_REG_READ32(pSpiHwRegs->pRegsBaseAdd, STATUS);
+ while (StatusReg & ResetBits)
+ {
+ StatusReg = SPI_REG_READ32(pSpiHwRegs->pRegsBaseAdd, STATUS);
+ }
+}
+
+/**
+ * Findout whether transmit fio is full or not
+ */
+static NvBool SpiHwIsTransmitFifoFull(SerialHwRegisters *pSpiHwRegs)
+{
+ NvU32 StatusReg = SPI_REG_READ32(pSpiHwRegs->pRegsBaseAdd, STATUS);
+ if (StatusReg & NV_DRF_DEF(SPI, STATUS, TXF_FULL, FULL))
+ return NV_TRUE;
+ return NV_FALSE;
+}
+
+
+/**
+ * Set the signal mode of communication whether this is the mode 0, 1, 2 or 3.
+ */
+static void
+SpiHwSetSignalMode(
+ SerialHwRegisters *pSpiHwRegs,
+ NvOdmQuerySpiSignalMode SignalMode)
+{
+ NvU32 CommandReg;
+ CommandReg = pSpiHwRegs->HwRegs.SpiRegs.Command;
+ switch (SignalMode)
+ {
+ case NvOdmQuerySpiSignalMode_0:
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, ACTIVE_SCLK,
+ DRIVE_LOW, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_NUM(SPI, COMMAND, CK_SDA, 0,
+ CommandReg);
+ break;
+
+ case NvOdmQuerySpiSignalMode_1:
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, ACTIVE_SCLK,
+ DRIVE_LOW, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_NUM(SPI, COMMAND, CK_SDA, 1,
+ CommandReg);
+ break;
+
+ case NvOdmQuerySpiSignalMode_2:
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, ACTIVE_SCLK,
+ DRIVE_HIGH, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_NUM(SPI, COMMAND, CK_SDA, 0,
+ CommandReg);
+ break;
+ case NvOdmQuerySpiSignalMode_3:
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, ACTIVE_SCLK,
+ DRIVE_HIGH, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_NUM(SPI, COMMAND, CK_SDA, 1,
+ CommandReg);
+ break;
+ default:
+ NV_ASSERT(!"Invalid SignalMode");
+ }
+ pSpiHwRegs->HwRegs.SpiRegs.Command = CommandReg;
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, COMMAND, CommandReg);
+ pSpiHwRegs->CurrSignalMode = SignalMode;
+}
+
+/**
+ * Set the transfer order whether the bit will start from the lsb or from
+ * msb.
+ */
+static void
+SpiHwSetTransferBitOrder(
+ SerialHwRegisters *pSpiHwRegs,
+ NvBool IsLsbFirst)
+{
+ // This feature is not supported on the spi controller.
+ if (IsLsbFirst)
+ NV_ASSERT(!"Not Supported");
+}
+
+/**
+ * Start the transfer of the communication.
+ */
+static void SpiHwStartTransfer(SerialHwRegisters *pSpiHwRegs, NvBool IsReconfigure)
+{
+ NvU32 DmaControlReg = pSpiHwRegs->HwRegs.SpiRegs.DmaControl;
+
+ // Enable the dma bit in the register variable only
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SPI, DMA_CTL, DMA_EN, ENABLE, DmaControlReg);
+
+ // Now write the command and dma control values into the controller register
+
+ // Need to write on the command register only if the reconfiguration is done.
+ // Other wise it is not required.
+ if (IsReconfigure)
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, COMMAND,
+ pSpiHwRegs->HwRegs.SpiRegs.Command);
+
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, DMA_CTL, DmaControlReg);
+}
+
+/**
+ * Enable/disable the data transfer flow.
+ */
+static void
+SpiHwSetDataFlow(
+SerialHwRegisters *pSerialHwRegs,
+ SerialHwDataFlow DataFlow,
+ NvBool IsEnable)
+{
+ NvU32 CommandReg = pSerialHwRegs->HwRegs.SpiRegs.Command;
+ if (DataFlow & SerialHwDataFlow_Rx)
+ {
+ if (IsEnable)
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, RXEN,
+ ENABLE, CommandReg);
+ else
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, RXEN,
+ DISABLE, CommandReg);
+ }
+
+ if (DataFlow & SerialHwDataFlow_Tx)
+ {
+ if (IsEnable)
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, TXEN,
+ ENABLE, CommandReg);
+ else
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, TXEN,
+ DISABLE, CommandReg);
+ }
+ pSerialHwRegs->HwRegs.SpiRegs.Command = CommandReg;
+ SPI_REG_WRITE32(pSerialHwRegs->pRegsBaseAdd, COMMAND,
+ pSerialHwRegs->HwRegs.SpiRegs.Command);
+}
+
+/**
+ * Set the chip select signal level to be default based on device during the
+ * initialization.
+ */
+static void
+SpiHwSetChipSelectDefaultLevelFxn(
+ SerialHwRegisters *pSpiHwRegs,
+ NvU32 ChipSelectId,
+ NvBool IsHigh)
+{
+ // No control over the individual cs lines.
+}
+
+/**
+ * Set the chip select signal level.
+ */
+static void
+SpiHwSetChipSelectLevel(
+ SerialHwRegisters *pSpiHwRegs,
+ NvU32 ChipSelectId,
+ NvBool IsHigh)
+{
+ NvU32 CommandReg = pSpiHwRegs->HwRegs.SpiRegs.Command;
+
+ // Clear all chipselect
+ CommandReg &= ~(RESET_ALL_CS);
+
+ // Set the chip select level.
+ if (IsHigh)
+ CommandReg = NV_FLD_SET_DRF_NUM(SPI, COMMAND, CS_VAL, 0, CommandReg);
+ else
+ CommandReg = NV_FLD_SET_DRF_NUM(SPI, COMMAND, CS_VAL, 1, CommandReg);
+
+ switch (ChipSelectId)
+ {
+ case 0:
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, CS0_EN, ENABLE,
+ CommandReg);
+ break;
+ case 1:
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, CS1_EN, ENABLE,
+ CommandReg);
+ break;
+ case 2:
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, CS2_EN, ENABLE,
+ CommandReg);
+ break;
+ case 3:
+ CommandReg = NV_FLD_SET_DRF_DEF(SPI, COMMAND, CS3_EN, ENABLE,
+ CommandReg);
+ break;
+ default:
+ NV_ASSERT(!"Invalid ChipSelectId");
+ }
+ pSpiHwRegs->HwRegs.SpiRegs.Command = CommandReg;
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, COMMAND, CommandReg);
+}
+
+/**
+ * Set the packet length and packed mode.
+ */
+static void
+SpiHwSetPacketLength(
+ SerialHwRegisters *pSpiHwRegs,
+ NvU32 PacketLength,
+ NvBool IsPackedMode)
+{
+ NvU32 CommandReg = pSpiHwRegs->HwRegs.SpiRegs.Command;
+ NvU32 DmaControlReg = pSpiHwRegs->HwRegs.SpiRegs.DmaControl;
+
+ CommandReg = NV_FLD_SET_DRF_NUM(SPI, COMMAND, BIT_LENGTH,
+ (PacketLength -1), CommandReg);
+ if (IsPackedMode)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SPI, DMA_CTL, PACKED, ENABLE,
+ DmaControlReg);
+ else
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SPI, DMA_CTL, PACKED, DISABLE,
+ DmaControlReg);
+
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, COMMAND, CommandReg);
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, DMA_CTL, DmaControlReg);
+
+ pSpiHwRegs->HwRegs.SpiRegs.Command = CommandReg;
+ pSpiHwRegs->HwRegs.SpiRegs.DmaControl = DmaControlReg;
+ pSpiHwRegs->PacketLength = PacketLength;
+ pSpiHwRegs->IsPackedMode = IsPackedMode;
+}
+
+/**
+ * Set the Dma transfer size.
+ */
+static void
+SpiHwSetDmaTransferSize(
+ SerialHwRegisters *pSpiHwRegs,
+ NvU32 DmaBlockSize)
+{
+ pSpiHwRegs->HwRegs.SpiRegs.DmaControl =
+ NV_FLD_SET_DRF_NUM(SPI, DMA_CTL, DMA_BLOCK_SIZE, (DmaBlockSize-1),
+ pSpiHwRegs->HwRegs.SpiRegs.DmaControl);
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, DMA_CTL, pSpiHwRegs->HwRegs.SpiRegs.DmaControl);
+}
+
+static NvU32 SpiHwGetTransferdCount(SerialHwRegisters *pSpiHwRegs)
+{
+ NvU32 DmaBlockSize;
+ NvU32 DmaControlReg = pSpiHwRegs->HwRegs.SpiRegs.DmaControl;
+ DmaBlockSize = NV_DRF_VAL(SPI, DMA_CTL, DMA_BLOCK_SIZE, DmaControlReg);
+ return (DmaBlockSize +1);
+}
+
+/**
+ * Set the trigger level.
+ */
+static void
+SpiHwSetTriggerLevel(
+ SerialHwRegisters *pSpiHwRegs,
+ SerialHwFifo FifoType,
+ NvU32 TriggerLevel)
+{
+ NvU32 DmaControlReg = pSpiHwRegs->HwRegs.SpiRegs.DmaControl;
+ switch(TriggerLevel)
+ {
+ case 4:
+ if (FifoType & SerialHwFifo_Rx)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SPI, DMA_CTL, RX_TRIG, TRIG1,
+ DmaControlReg);
+ if (FifoType & SerialHwFifo_Tx)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SPI, DMA_CTL, TX_TRIG, TRIG1,
+ DmaControlReg);
+ break;
+
+ case 16:
+ if (FifoType & SerialHwFifo_Rx)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SPI, DMA_CTL, RX_TRIG, TRIG4,
+ DmaControlReg);
+ if (FifoType & SerialHwFifo_Tx)
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SPI, DMA_CTL, TX_TRIG, TRIG4,
+ DmaControlReg);
+ break;
+ default:
+ NV_ASSERT(!"Invalid Triggerlevel");
+ }
+ pSpiHwRegs->HwRegs.SpiRegs.DmaControl = DmaControlReg;
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, DMA_CTL, DmaControlReg);
+}
+
+/**
+ * Write into the transmit fifo register.
+ * returns the number of words written.
+ */
+static NvU32
+SpiHwWriteInTransmitFifo(
+ SerialHwRegisters *pSpiHwRegs,
+ NvU32 *pTxBuff,
+ NvU32 WordRequested)
+{
+ NvU32 WordWritten = 0;
+ NvU32 WordsRemaining = NV_MIN(WordRequested, MAX_SPI_FIFO_DEPTH);
+ while (WordsRemaining)
+ {
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, TX_FIFO, *pTxBuff);
+ pTxBuff++;
+ WordsRemaining--;
+ WordWritten++;
+ }
+ return WordWritten;
+}
+
+/**
+ * Read the data from the receive fifo.
+ * Returns the number of words it read.
+ */
+static NvU32
+SpiHwReadFromReceiveFifo(
+ SerialHwRegisters *pSpiHwRegs,
+ NvU32 *pRxBuff,
+ NvU32 WordRequested)
+{
+ NvU32 WordsRemaining = WordRequested;
+ while (WordsRemaining)
+ {
+ *pRxBuff = SPI_REG_READ32(pSpiHwRegs->pRegsBaseAdd, RX_FIFO);
+ pRxBuff++;
+ WordsRemaining--;
+ }
+ return WordRequested;
+}
+
+/**
+ * Enable/disable the interrupt source.
+ */
+static void
+SpiHwSetInterruptSource(
+ SerialHwRegisters *pSpiHwRegs,
+ SerialHwDataFlow DataDirection,
+ NvBool IsEnable)
+{
+ NvU32 DmaControlReg = pSpiHwRegs->HwRegs.SpiRegs.DmaControl;
+ if (DataDirection & SerialHwDataFlow_Rx)
+ {
+ if (IsEnable)
+ {
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SPI, DMA_CTL, IE_RXC,
+ ENABLE, DmaControlReg);
+ }
+ else
+ {
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SPI, DMA_CTL, IE_RXC,
+ DISABLE, DmaControlReg);
+ }
+ }
+
+ if (DataDirection & SerialHwDataFlow_Tx)
+ {
+ if (IsEnable)
+ {
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SPI, DMA_CTL, IE_TXC,
+ ENABLE, DmaControlReg);
+ }
+ else
+ {
+ DmaControlReg = NV_FLD_SET_DRF_DEF(SPI, DMA_CTL, IE_TXC,
+ DISABLE, DmaControlReg);
+ }
+ }
+
+ pSpiHwRegs->HwRegs.SpiRegs.DmaControl = DmaControlReg;
+}
+
+/**
+ * Get the transfer status.
+ */
+static NvError SpiHwGetTransferStatus(SerialHwRegisters *pSpiHwRegs,
+ SerialHwDataFlow DataFlow)
+{
+ NvU32 StatusReg = SPI_REG_READ32(pSpiHwRegs->pRegsBaseAdd, STATUS);
+
+ pSpiHwRegs->HwRegs.SlinkRegs.Status = StatusReg;
+ // Check for the receive error
+ if (DataFlow & SerialHwDataFlow_Rx)
+ {
+ if (StatusReg & NV_DRF_NUM(SPI, STATUS, RXF_UNR, 1))
+ return NvError_SpiReceiveError;
+ }
+
+ // Check for the transmit error
+ if (DataFlow & SerialHwDataFlow_Tx)
+ {
+ if (StatusReg & NV_DRF_NUM(SPI, STATUS, TXF_OVF, 1))
+ return NvError_SpiTransmitError;
+ }
+ return NvSuccess;
+}
+
+static void SpiHwClearTransferStatus(SerialHwRegisters *pSpiHwRegs,
+ SerialHwDataFlow DataFlow)
+{
+ NvU32 StatusReg = pSpiHwRegs->HwRegs.SpiRegs.Status ;
+
+ // Clear all the write 1 on clear status.
+ StatusReg &= (~ALL_SPI_STATUS_CLEAR);
+
+ // Make ready clear to 1.
+ StatusReg = NV_FLD_SET_DRF_NUM(SPI, STATUS, RDY, 1, StatusReg);
+
+ // Check for the receive error
+ if (DataFlow & SerialHwDataFlow_Rx)
+ StatusReg |= NV_DRF_NUM(SPI, STATUS, RXF_UNR, 1);
+
+ // Check for the transmit error
+ if (DataFlow & SerialHwDataFlow_Tx)
+ StatusReg |= NV_DRF_NUM(SPI, STATUS, TXF_OVF, 1);
+
+ // Write on slink status register.
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, STATUS, StatusReg);
+}
+
+/**
+ * Check whether transfer is completed or not.
+ */
+static NvBool SpiHwIsTransferCompleted(SerialHwRegisters *pSpiHwRegs)
+{
+ // Read the Status register
+ NvU32 StatusReg = SPI_REG_READ32(pSpiHwRegs->pRegsBaseAdd, STATUS);
+
+ if (StatusReg & NV_DRF_DEF(SPI, STATUS, BSY, BUSY))
+ return NV_FALSE;
+
+ // Transfer is completed so clear the ready bit by write 1 to clear
+ // Clear all the write 1 on clear status.
+ StatusReg &= (~ALL_SPI_STATUS_CLEAR);
+
+ // Make ready clear to 1.
+ StatusReg = NV_FLD_SET_DRF_NUM(SPI, STATUS, RDY, 1, StatusReg);
+
+ SPI_REG_WRITE32(pSpiHwRegs->pRegsBaseAdd, STATUS, StatusReg);
+
+ return NV_TRUE;
+}
+
+/**
+ * Initialize the spi intterface for the hw access.
+ */
+void NvRmPrivSpiSlinkInitSpiInterface(HwInterface *pSpiInterface)
+{
+ pSpiInterface->HwRegisterInitializeFxn = SpiHwRegisterInitialize;
+ pSpiInterface->HwControllerInitializeFxn = SpiHwControllerInitialize;
+ pSpiInterface->HwSetFunctionalModeFxn = SpiHwSetFunctionalMode;
+ pSpiInterface->HwResetFifoFxn = SpiHwResetFifo;
+ pSpiInterface->HwIsTransmitFifoFull = SpiHwIsTransmitFifoFull;
+ pSpiInterface->HwSetSignalModeFxn = SpiHwSetSignalMode;
+ pSpiInterface->HwSetTransferBitOrderFxn = SpiHwSetTransferBitOrder;
+ pSpiInterface->HwStartTransferFxn = SpiHwStartTransfer;
+ pSpiInterface->HwSetDataFlowFxn = SpiHwSetDataFlow;
+ pSpiInterface->HwSetChipSelectDefaultLevelFxn = SpiHwSetChipSelectDefaultLevelFxn;
+ pSpiInterface->HwSetChipSelectLevelFxn = SpiHwSetChipSelectLevel;
+ pSpiInterface->HwSetPacketLengthFxn = SpiHwSetPacketLength;
+ pSpiInterface->HwSetDmaTransferSizeFxn = SpiHwSetDmaTransferSize;
+ pSpiInterface->HwGetTransferdCountFxn = SpiHwGetTransferdCount;
+ pSpiInterface->HwSetTriggerLevelFxn = SpiHwSetTriggerLevel;
+ pSpiInterface->HwWriteInTransmitFifoFxn = SpiHwWriteInTransmitFifo;
+ pSpiInterface->HwReadFromReceiveFifoFxn = SpiHwReadFromReceiveFifo;
+ pSpiInterface->HwSetInterruptSourceFxn = SpiHwSetInterruptSource;
+ pSpiInterface->HwClearTransferStatusFxn = SpiHwClearTransferStatus;
+ pSpiInterface->HwGetTransferStatusFxn = SpiHwGetTransferStatus;
+ pSpiInterface->HwIsTransferCompletedFxn = SpiHwIsTransferCompleted;
+}
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_slink.c b/arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_slink.c
new file mode 100644
index 000000000000..15decda8535c
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_slink.c
@@ -0,0 +1,2932 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>NVIDIA Driver Development Kit:
+ * Spi Driver implementation</b>
+ *
+ * @b Description: Implementation of the NvRm SPI API of the OAL and non-OAL
+ * version.
+ *
+ */
+
+#include "nvrm_spi.h"
+#include "nvrm_power.h"
+#include "nvrm_memmgr.h"
+#include "nvrm_dma.h"
+#include "nvodm_query.h"
+#include "rm_spi_slink_hw_private.h"
+#include "nvrm_hardware_access.h"
+#include "nvassert.h"
+#include "nvodm_query_pinmux.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_pinmux_utils.h"
+#include "nvodm_modules.h"
+#include "rm_spi_slink.h"
+#include "nvrm_priv_ap_general.h"
+
+
+// Combined maximum spi/slink controllers
+#define MAX_SPI_SLINK_INSTANCE (MAX_SLINK_CONTROLLERS + MAX_SPI_CONTROLLERS)
+
+// Constants used to size arrays for the maximum chipselect available for the
+// per spi/slink channel.
+#define MAX_CHIPSELECT_PER_INSTANCE 4
+
+// The maximum slave size request in words. Maximum 64KB/64K packet
+#define MAXIMUM_SLAVE_TRANSFER_WORD (1 << (16-2))
+
+// Maximum number which is return by the NvOsGetTimeMS().
+// For NV_OAL, NvOsGetTimeMS() returns the MicroSecond Timer count divided by 1000.
+// and microsecond timer have the maximum count of 0xFFFFFFFF.
+// For Non-NV_OAL, it returned maximum of 0xFFFFFFFF
+#if NV_OAL
+#define MAX_TIME_IN_MS (0xFFFFFFFF/1000)
+#else
+#define MAX_TIME_IN_MS 0xFFFFFFFF
+#endif
+
+// The maximum request size for one transaction using the dma
+enum {DEFAULT_DMA_BUFFER_SIZE = (0x4000)}; // 16KB
+
+// Maximum buffer size when transferring the data using the cpu.
+enum {MAX_CPU_TRANSACTION_SIZE_WORD = 0x80}; // 256 bytes
+
+// Maximum non dma transfer count for apb dma got hold from allocation
+enum {MAX_DMA_HOLD_TIME = 16}; // Maximum 16 non dma transaction
+
+
+// The maximum number of word on which it can select the polling method when
+// cpu based transaction is selected.
+enum {SLINK_POLLING_HIGH_THRESOLD = 64};
+
+// The dma buffer alignment requirement.
+enum {DMA_BUFFER_ALIGNMENT = 0x10};
+
+// Combined the Details of the current transfer information.
+typedef struct
+{
+ NvU32 *pTxBuff;
+ NvU32 *pRxBuff;
+
+ NvU32 BytesPerPacket;
+ NvU32 PacketBitLength;
+ NvBool IsPackedMode;
+
+ NvU32 PacketsPerWord;
+ NvU32 PacketRequested;
+ NvU32 PacketTransferred;
+ NvU32 TotalPacketsRemaining;
+
+ NvU32 RxPacketsRemaining;
+ NvU32 TxPacketsRemaining;
+
+ NvU32 CurrPacketCount;
+} TransferBufferInfo;
+
+/**
+ * Combines the spi/slink channel information.
+ */
+typedef struct NvRmSpiRec
+{
+ // Nv Rm device handles.
+ NvRmDeviceHandle hDevice;
+
+ // Instance Id
+ NvU32 InstanceId;
+
+ // Is opened in master mode or slave mode.
+ NvBool IsMasterMode;
+
+ // Rm module Id for the reference.
+ NvRmModuleID RmModuleId;
+
+ // Rm IO module Id for the reference.
+ NvOdmIoModule RmIoModuleId;
+
+ // Tells whether this is the spi channel or not.
+ NvBool IsSpiChannel;
+
+ // The channel open count.
+ NvU32 OpenCount;
+
+ // Spi hw register information.
+ SerialHwRegisters HwRegs;
+
+ // Current chipselect id on which data transfer is going on.
+ NvU32 CurrTransferChipSelId;
+
+ // Synchronous sempahore Id which need to be signalled on transfer
+ // completion.
+ NvOsSemaphoreHandle hSynchSema;
+
+ // Mutex to access this channel to provide the mutual exclusion.
+ NvOsMutexHandle hChannelAccessMutex;
+
+ // Tells whether the dma mode is supported or not.
+ NvBool IsApbDmaAllocated;
+
+ NvU32 TransCountFromLastDmaUsage;
+
+ // Read dma handle.
+ NvRmDmaHandle hRmRxDma;
+
+ // Write dma handle.
+ NvRmDmaHandle hRmTxDma;
+
+ // Memory handle to create the uncached memory.
+ NvRmMemHandle hRmMemory;
+
+ // Rx Dma buffer physical address.
+ NvRmPhysAddr DmaRxBuffPhysAdd;
+
+ // Tx Dma buffer physical address.
+ NvRmPhysAddr DmaTxBuffPhysAdd;
+
+ // Virtual pointer to the Rx dma buffer.
+ NvU32 *pRxDmaBuffer;
+
+ // Virtual pointer to the Tx dma buffer.
+ NvU32 *pTxDmaBuffer;
+
+ // Current Dma transfer size for the Rx and tx
+ NvU32 DmaBufferSize;
+
+ // Dma request for Tx
+ NvRmDmaClientBuffer TxDmaReq;
+
+ // Dma request for rx
+ NvRmDmaClientBuffer RxDmaReq;
+
+ // Tell whether it is using the apb dma for the transfer or not.
+ NvBool IsUsingApbDma;
+
+ // Buffer which will be used when cpu does the data receving.
+ NvU32 *pRxCpuBuffer;
+
+ // Buffer which will be used when cpu does the data transmitting.
+ NvU32 *pTxCpuBuffer;
+
+ NvU32 CpuBufferSizeInWords;
+
+ // Details of the current transfer information.
+ TransferBufferInfo CurrTransInfo;
+
+ // The data transfer dirction.
+ SerialHwDataFlow CurrentDirection;
+
+ // The transfer status for the receive and transmit
+ NvError RxTransferStatus;
+ NvError TxTransferStatus;
+
+ // Currently configured clock frequency
+ NvU32 ClockFreqInKHz;
+
+ NvOdmQuerySpiDeviceInfo DeviceInfo[MAX_CHIPSELECT_PER_INSTANCE];
+
+ NvBool IsCurrentChipSelStateHigh[MAX_CHIPSELECT_PER_INSTANCE];
+
+ NvBool IsChipSelSupported[MAX_CHIPSELECT_PER_INSTANCE];
+
+ HwInterfaceHandle hHwInterface;
+
+ NvU32 RmPowerClientId;
+
+ NvOsInterruptHandle SpiInterruptHandle;
+
+ // Configured pin mux
+ NvU32 SpiPinMap;
+
+ // Idle signal state for the spi channel.
+ NvBool IsIdleSignalTristate;
+
+ // Frequency requiremets
+ NvRmDfsBusyHint BusyHints[4];
+
+} NvRmSpi;
+
+/**
+ * Combines the spi/slink structure information.
+ */
+typedef struct
+{
+ // Nv Rm device handles.
+ NvRmDeviceHandle hDevice;
+
+ // Pointer to the list of the handles of the spi/slink channels.
+ NvRmSpiHandle hSpiSlinkChannelList[MAX_SPI_SLINK_INSTANCE];
+
+ // Mutex for spi/slink channel information.
+ NvOsMutexHandle hChannelAccessMutex;
+} NvRmPrivSpiSlinkInfo;
+
+typedef struct
+{
+ NvU32 MajorVersion;
+ NvU32 MinorVersion;
+} SlinkCapabilities;
+
+static NvRmPrivSpiSlinkInfo s_SpiSlinkInfo;
+static HwInterface s_SpiHwInterface;
+static HwInterface s_SlinkHwInterface;
+
+/**
+ * Get the interfacing property for the device connected to given chip select Id.
+ * Returns whether this is supported or not.
+ */
+static NvBool
+SpiSlinkGetDeviceInfo(
+ NvBool IsSpiChannel,
+ NvU32 InstanceId,
+ NvU32 ChipSelect,
+ NvOdmQuerySpiDeviceInfo *pDeviceInfo)
+{
+ const NvOdmQuerySpiDeviceInfo *pSpiDevInfo = NULL;
+ NvOdmIoModule OdmModuleName;
+
+ OdmModuleName = (IsSpiChannel)? NvOdmIoModule_Sflash: NvOdmIoModule_Spi;
+ pSpiDevInfo = NvOdmQuerySpiGetDeviceInfo(OdmModuleName, InstanceId, ChipSelect);
+ if (!pSpiDevInfo)
+ {
+ // No device info in odm, so set it on default state.
+ pDeviceInfo->SignalMode = NvOdmQuerySpiSignalMode_0;
+ pDeviceInfo->ChipSelectActiveLow = NV_TRUE;
+ return NV_FALSE;
+ }
+ pDeviceInfo->SignalMode = pSpiDevInfo->SignalMode;
+ pDeviceInfo->ChipSelectActiveLow = pSpiDevInfo->ChipSelectActiveLow;
+ return NV_TRUE;
+}
+
+/**
+ * Create the dma buffer memory handle.
+ */
+static NvError
+CreateDmaBufferMemoryHandle(
+ NvRmDeviceHandle hDevice,
+ NvRmMemHandle *phNewMemHandle,
+ NvRmPhysAddr *pNewMemAddr,
+ NvU32 BufferSize)
+{
+ NvError Error = NvSuccess;
+ NvRmMemHandle hNewMemHandle = NULL;
+
+ // Initialize the memory handle with NULL
+ *phNewMemHandle = NULL;
+
+ /// Create memory handle
+ Error = NvRmMemHandleCreate(hDevice, &hNewMemHandle, BufferSize);
+
+ // Allocates the memory from the sdram
+ if (!Error)
+ Error = NvRmMemAlloc(hNewMemHandle, NULL,
+ 0, DMA_BUFFER_ALIGNMENT,
+ NvOsMemAttribute_Uncached);
+
+ // Pin the memory allocation so that it should not move by memory manager.
+ if (!Error)
+ *pNewMemAddr = NvRmMemPin(hNewMemHandle);
+
+ // If error then free the memory allocation and memory handle.
+ if (Error)
+ {
+ NvRmMemHandleFree(hNewMemHandle);
+ hNewMemHandle = NULL;
+ }
+
+ *phNewMemHandle = hNewMemHandle;
+ return Error;
+}
+
+ /**
+ * Destroy the dma buffer memory handle.
+ * Thread safety: Caller responsibity.
+ */
+static void DestroyDmaBufferMemoryHandle(NvRmMemHandle hMemHandle)
+{
+ // Can accept the null parameter. If it is not null then only destroy.
+ if (hMemHandle)
+ {
+ // Unpin the memory allocation.
+ NvRmMemUnpin(hMemHandle);
+
+ // Free the memory handle.
+ NvRmMemHandleFree(hMemHandle);
+ }
+}
+
+/**
+ * Create the dma transfer buffer for the given handles.
+ * Thread safety: Caller responsibity.
+ */
+static NvError
+CreateDmaTransferBuffer(
+ NvRmDeviceHandle hRmDevice,
+ NvRmMemHandle *phRmMemory,
+ NvRmPhysAddr *pBuffPhysAddr1,
+ void **pBuffPtr1,
+ NvRmPhysAddr *pBuffPhysAddr2,
+ void **pBuffPtr2,
+ NvU32 OneBufferSize)
+{
+ NvError Error = NvSuccess;
+ NvRmMemHandle hRmMemory = NULL;
+ NvRmPhysAddr BuffPhysAddr;
+
+ // Reset all the members realted to the dma buffer.
+ BuffPhysAddr = 0;
+
+ *phRmMemory = NULL;
+ *pBuffPtr1 = (void *)NULL;
+ *pBuffPhysAddr1 = 0;
+ *pBuffPtr2 = (void *)NULL;
+ *pBuffPhysAddr2 = 0;
+
+ // Create the dma buffer memory for receive and transmit.
+ // It will be double of the OneBufferSize
+ Error = CreateDmaBufferMemoryHandle(hRmDevice, &hRmMemory, &BuffPhysAddr,
+ (OneBufferSize <<1));
+ if (!Error)
+ {
+ // 0 to OneBufferSize-1 is buffer 1 and OneBufferSize to 2*OneBufferSize
+ // is second buffer.
+ Error = NvRmMemMap(hRmMemory, 0, OneBufferSize,
+ NVOS_MEM_READ_WRITE, pBuffPtr1);
+ if (!Error)
+ {
+ Error = NvRmMemMap(hRmMemory, OneBufferSize, OneBufferSize,
+ NVOS_MEM_READ_WRITE, pBuffPtr2);
+ if (Error)
+ NvRmMemUnmap(hRmMemory, pBuffPtr1, OneBufferSize);
+ }
+ // If error then free the allocation and reset all changed value.
+ if (Error)
+ {
+ DestroyDmaBufferMemoryHandle(hRmMemory);
+ hRmMemory = NULL;
+ *pBuffPtr1 = (void *)NULL;
+ *pBuffPtr2 = (void *)NULL;
+ return Error;
+ }
+ *phRmMemory = hRmMemory;
+ *pBuffPhysAddr1 = BuffPhysAddr;
+ *pBuffPhysAddr2 = BuffPhysAddr + OneBufferSize;
+ }
+ return Error;
+}
+
+/**
+ * Destroy the dma transfer buffer.
+ * Thread safety: Caller responsibity.
+ */
+static void
+DestroyDmaTransferBuffer(
+ NvRmMemHandle hRmMemory,
+ void *pBuffPtr1,
+ void *pBuffPtr2,
+ NvU32 OneBufferSize)
+{
+ if (hRmMemory)
+ {
+ if (pBuffPtr1)
+ NvRmMemUnmap(hRmMemory, pBuffPtr1, OneBufferSize);
+ if (pBuffPtr2)
+ NvRmMemUnmap(hRmMemory, pBuffPtr2, OneBufferSize);
+ DestroyDmaBufferMemoryHandle(hRmMemory);
+ }
+}
+
+static NvBool HandleTransferCompletion(NvRmSpiHandle hRmSpiSlink)
+{
+ NvU32 WordsReq;
+ NvU32 WordsRead;
+ NvU32 CurrPacketSize;
+ NvU32 WordsWritten;
+ HwInterfaceHandle hHwInt = hRmSpiSlink->hHwInterface;
+
+ if (hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Tx)
+ hRmSpiSlink->TxTransferStatus =
+ hHwInt->HwGetTransferStatusFxn(&hRmSpiSlink->HwRegs, SerialHwDataFlow_Tx);
+
+ if (hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Rx)
+ hRmSpiSlink->RxTransferStatus =
+ hHwInt->HwGetTransferStatusFxn(&hRmSpiSlink->HwRegs, SerialHwDataFlow_Rx);
+
+ hHwInt->HwClearTransferStatusFxn(&hRmSpiSlink->HwRegs, hRmSpiSlink->CurrentDirection);
+
+ // Any error then stop the transfer and return.
+ if (hRmSpiSlink->RxTransferStatus || hRmSpiSlink->TxTransferStatus)
+ {
+ hHwInt->HwSetDataFlowFxn(&hRmSpiSlink->HwRegs, hRmSpiSlink->CurrentDirection, NV_FALSE);
+ hHwInt->HwResetFifoFxn(&hRmSpiSlink->HwRegs, SerialHwFifo_Both);
+ hRmSpiSlink->CurrTransInfo.PacketTransferred +=
+ hHwInt->HwGetTransferdCountFxn(&hRmSpiSlink->HwRegs);
+ hRmSpiSlink->CurrentDirection = SerialHwDataFlow_None;
+ return NV_TRUE;
+ }
+
+ // If dma transfer complete then return transfer completion.
+ if (hRmSpiSlink->IsUsingApbDma)
+ return NV_TRUE;
+
+ if ((hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Rx) &&
+ (hRmSpiSlink->CurrTransInfo.RxPacketsRemaining))
+ {
+ WordsReq = ((hRmSpiSlink->CurrTransInfo.CurrPacketCount) +
+ ((hRmSpiSlink->CurrTransInfo.PacketsPerWord) -1))/
+ (hRmSpiSlink->CurrTransInfo.PacketsPerWord);
+
+ WordsRead = hHwInt->HwReadFromReceiveFifoFxn(&hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrTransInfo.pRxBuff, WordsReq);
+ hRmSpiSlink->CurrTransInfo.RxPacketsRemaining -=
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount;
+ hRmSpiSlink->CurrTransInfo.PacketTransferred +=
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount;
+ hRmSpiSlink->CurrTransInfo.pRxBuff += WordsRead;
+ }
+
+ if ((hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Tx) &&
+ (hRmSpiSlink->CurrTransInfo.TxPacketsRemaining))
+ {
+ WordsReq = (hRmSpiSlink->CurrTransInfo.TxPacketsRemaining +
+ hRmSpiSlink->CurrTransInfo.PacketsPerWord -1)/
+ hRmSpiSlink->CurrTransInfo.PacketsPerWord;
+
+ WordsWritten = hHwInt->HwWriteInTransmitFifoFxn(
+ &hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrTransInfo.pTxBuff, WordsReq);
+ CurrPacketSize = NV_MIN(hRmSpiSlink->CurrTransInfo.PacketsPerWord * WordsWritten,
+ hRmSpiSlink->CurrTransInfo.TxPacketsRemaining);
+ hHwInt->HwSetDmaTransferSizeFxn(&hRmSpiSlink->HwRegs, CurrPacketSize);
+ hHwInt->HwStartTransferFxn(&hRmSpiSlink->HwRegs,
+ NV_FALSE);
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount = CurrPacketSize;
+ hRmSpiSlink->CurrTransInfo.TxPacketsRemaining -= CurrPacketSize;
+ hRmSpiSlink->CurrTransInfo.PacketTransferred += CurrPacketSize;
+ hRmSpiSlink->CurrTransInfo.pTxBuff += WordsWritten;
+ return NV_FALSE;
+ }
+
+ // If still need to do the transfer for receiving the data then start now.
+ if ((hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Rx) &&
+ (hRmSpiSlink->CurrTransInfo.RxPacketsRemaining))
+ {
+ CurrPacketSize = NV_MIN(hRmSpiSlink->CurrTransInfo.RxPacketsRemaining,
+ (hRmSpiSlink->HwRegs.MaxWordTransfer*
+ hRmSpiSlink->CurrTransInfo.PacketsPerWord));
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount = CurrPacketSize;
+ hHwInt->HwSetDmaTransferSizeFxn(&hRmSpiSlink->HwRegs, CurrPacketSize);
+ hHwInt->HwStartTransferFxn(&hRmSpiSlink->HwRegs, NV_FALSE);
+ return NV_FALSE;
+ }
+
+ // All requested transfer is completed.
+ return NV_TRUE;
+}
+
+static void SpiSlinkIsr(void *args)
+{
+ NvRmSpiHandle hRmSpiSlink = args;
+ NvBool IsTransferCompleted;
+
+ IsTransferCompleted = HandleTransferCompletion(hRmSpiSlink);
+ if (IsTransferCompleted)
+ NvOsSemaphoreSignal(hRmSpiSlink->hSynchSema);
+ NvRmInterruptDone(hRmSpiSlink->SpiInterruptHandle);
+}
+
+
+static NvError
+WaitForTransferCompletion(
+ NvRmSpiHandle hRmSpiSlink,
+ NvU32 WaitTimeOutMS,
+ NvBool IsPoll)
+{
+ NvBool IsReady;
+ NvBool IsTransferComplete= NV_FALSE;
+ NvU32 StartTime;
+ NvU32 CurrentTime;
+ NvU32 TimeElapsed;
+ NvBool IsWait = NV_TRUE;
+ NvError Error = NvSuccess;
+ NvU32 DmaRxTransferCountBytes = 0;
+ NvU32 PacketTransferedFromFifoYet = 0;
+ NvU32 CurrentSlinkPacketTransfer;
+ NvU32 PacketsInRxFifo;
+ NvU32 WordsAvailbleInFifo;
+ NvU32 WordsRead;
+ NvU32 *pUpdatedRxBuffer = NULL;
+#if NV_OAL
+ // For oal version, we only use the polling method.
+ IsPoll = NV_TRUE;
+#endif
+
+ if (IsPoll)
+ {
+ StartTime = NvOsGetTimeMS();
+ while (IsWait)
+ {
+ IsReady = hRmSpiSlink->hHwInterface->HwIsTransferCompletedFxn(&hRmSpiSlink->HwRegs);
+ if (IsReady)
+ {
+ IsTransferComplete = HandleTransferCompletion(hRmSpiSlink);
+ if(IsTransferComplete)
+ break;
+ }
+ if (WaitTimeOutMS != NV_WAIT_INFINITE)
+ {
+ CurrentTime = NvOsGetTimeMS();
+ TimeElapsed = (CurrentTime >= StartTime)? (CurrentTime - StartTime):
+ MAX_TIME_IN_MS - StartTime + CurrentTime;
+ IsWait = (TimeElapsed > WaitTimeOutMS)? NV_FALSE: NV_TRUE;
+ }
+ }
+
+ Error = (IsTransferComplete)? NvError_Success: NvError_Timeout;
+#if NV_OAL
+ // If no error and apb dma based transfer then stop the dma transfer to
+ // make the state dma state machine as non busy.
+ if ((!Error) && (hRmSpiSlink->IsUsingApbDma))
+ {
+ if (hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Rx)
+ NvRmDmaAbort(hRmSpiSlink->hRmRxDma);
+ if (hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Tx)
+ NvRmDmaAbort(hRmSpiSlink->hRmTxDma);
+ }
+#endif
+ }
+ else
+ {
+ Error = NvOsSemaphoreWaitTimeout(hRmSpiSlink->hSynchSema, WaitTimeOutMS);
+ }
+
+ // If timeout happen then stop all transfer and exit.
+ if (Error == NvError_Timeout)
+ {
+ // Disable the data flow first.
+ hRmSpiSlink->hHwInterface->HwSetDataFlowFxn(&hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrentDirection, NV_FALSE);
+
+ // Get the transfer count now.
+ if (hRmSpiSlink->IsUsingApbDma)
+ {
+ if (hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Rx)
+ {
+ // Get the Rx transfer count transferred by Dma.
+ Error = NvRmDmaGetTransferredCount(hRmSpiSlink->hRmRxDma,
+ &DmaRxTransferCountBytes, NV_TRUE);
+ NV_ASSERT(Error == NvSuccess);
+ if (Error != NvSuccess)
+ DmaRxTransferCountBytes = 0;
+ PacketTransferedFromFifoYet = (DmaRxTransferCountBytes >> 2) *
+ hRmSpiSlink->CurrTransInfo.PacketsPerWord;
+ pUpdatedRxBuffer = hRmSpiSlink->pRxDmaBuffer + (DmaRxTransferCountBytes >> 2);
+ NvRmDmaAbort(hRmSpiSlink->hRmRxDma);
+ }
+
+ if (hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Tx)
+ NvRmDmaAbort(hRmSpiSlink->hRmTxDma);
+ }
+ else
+ {
+ PacketTransferedFromFifoYet = hRmSpiSlink->CurrTransInfo.PacketTransferred;
+ pUpdatedRxBuffer = hRmSpiSlink->CurrTransInfo.pRxBuff;
+ }
+
+ // Check again whether the transfer is completed or not.
+ // It may be possible that transfer is completed when we reach here.
+ // If transfer is completed then we may read 0 from the status
+ // register
+ IsReady = hRmSpiSlink->hHwInterface->HwIsTransferCompletedFxn(&hRmSpiSlink->HwRegs);
+ if (IsReady)
+ {
+ // All requested transfer has been done.
+ CurrentSlinkPacketTransfer = hRmSpiSlink->CurrTransInfo.CurrPacketCount;
+ Error = NvSuccess;
+ }
+ else
+ {
+ // Get the transfer count from status register.
+ CurrentSlinkPacketTransfer =
+ hRmSpiSlink->hHwInterface->HwGetTransferdCountFxn(&hRmSpiSlink->HwRegs);
+
+ // If it is in packed mode and number of received packet is non word
+ // aligned then ignore the packet which does not able to make the word.
+ // This is because we can not read such packet from fifo as this is not
+ // avaiable in the fifo. -- Hw issue
+ if (hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Rx)
+ {
+ if (hRmSpiSlink->CurrTransInfo.PacketsPerWord > 1)
+ CurrentSlinkPacketTransfer -=
+ CurrentSlinkPacketTransfer%
+ hRmSpiSlink->CurrTransInfo.PacketsPerWord;
+ }
+
+ }
+ hRmSpiSlink->CurrTransInfo.PacketTransferred += CurrentSlinkPacketTransfer;
+
+ // Disable the interrupt.
+ if (!IsPoll)
+ hRmSpiSlink->hHwInterface->HwSetInterruptSourceFxn(&hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrentDirection, NV_FALSE);
+
+ // For Rx: Dma will always transfer equal to or less than slink has
+ // transferred. If slink has transferred more data and dma have
+ // not transferrd from the fifo to memory then there may be some more
+ // data available into the fifo. Reading those from cpu.
+ // For Tx: The dma will transfer more than slink has and non transferred
+ // data wil be in foopf which will get reset after slink reset. No need
+ // to do any more for tx case.
+ if (hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Rx)
+ {
+ // If slink transfrred word is more than the dma transfer count
+ // then some more data is available into the fifo. Read then
+ // through CPU.
+ if (PacketTransferedFromFifoYet < CurrentSlinkPacketTransfer)
+ {
+ PacketsInRxFifo = CurrentSlinkPacketTransfer - PacketTransferedFromFifoYet;
+ WordsAvailbleInFifo =
+ (PacketsInRxFifo + hRmSpiSlink->CurrTransInfo.PacketsPerWord -1)/
+ hRmSpiSlink->CurrTransInfo.PacketsPerWord;
+ WordsRead = hRmSpiSlink->hHwInterface->HwReadFromReceiveFifoFxn(
+ &hRmSpiSlink->HwRegs, pUpdatedRxBuffer, WordsAvailbleInFifo);
+
+ // Expecting the WordsRead should be equal to WordsAvailbleInFifo
+ if (WordsRead != WordsAvailbleInFifo)
+ {
+ NV_ASSERT(WordsRead == WordsAvailbleInFifo);
+ }
+ }
+ }
+
+
+ // The busy bit will still show the busy status so need to reset the
+ // controller. .. Hw Bug
+ NvRmModuleReset(hRmSpiSlink->hDevice,
+ NVRM_MODULE_ID(hRmSpiSlink->RmModuleId, hRmSpiSlink->InstanceId));
+ hRmSpiSlink->CurrentDirection = SerialHwDataFlow_None;
+ }
+ return Error;
+}
+
+#if NV_OAL
+static void OalMasterSpiSlinkPoll(NvRmSpiHandle hRmSpiSlink)
+{
+ NvBool IsReady;
+ NvBool TransferComplete = NV_FALSE;
+ //Check for the transfer complete in infinite loop
+ while (1)
+ {
+ IsReady = hRmSpiSlink->hHwInterface->HwIsTransferCompletedFxn(&hRmSpiSlink->HwRegs);
+ if (IsReady)
+ {
+ TransferComplete = HandleTransferCompletion(hRmSpiSlink);
+ if(TransferComplete)
+ break;
+ }
+ }
+}
+#endif
+
+/**
+ * Register the spi interrupt.
+ * Thread safety: Caller responsibity.
+ */
+static NvError
+RegisterSpiSlinkInterrupt(
+ NvRmDeviceHandle hDevice,
+ NvRmSpiHandle hRmSpiSlink,
+ NvU32 InstanceId)
+{
+ NvU32 IrqList;
+ NvOsInterruptHandler hIntHandlers;
+ if (hRmSpiSlink->SpiInterruptHandle)
+ return NvSuccess;
+
+ IrqList = NvRmGetIrqForLogicalInterrupt(
+ hDevice, NVRM_MODULE_ID(hRmSpiSlink->RmModuleId, InstanceId), 0);
+ hIntHandlers = SpiSlinkIsr;
+ return(NvRmInterruptRegister(hDevice, 1, &IrqList,
+ &hIntHandlers, hRmSpiSlink, &hRmSpiSlink->SpiInterruptHandle, NV_TRUE));
+}
+
+static NvError SetPowerControl(NvRmSpiHandle hRmSpiSlink, NvBool IsEnable)
+{
+ NvError Error = NvSuccess;
+ NvRmModuleID ModuleId;
+
+ ModuleId = NVRM_MODULE_ID(hRmSpiSlink->RmModuleId, hRmSpiSlink->InstanceId);
+ if (IsEnable)
+ {
+ hRmSpiSlink->BusyHints[0].BoostKHz = 80000; // Emc
+ hRmSpiSlink->BusyHints[1].BoostKHz = 80000; // Ahb
+ hRmSpiSlink->BusyHints[2].BoostKHz = 80000; // Apb
+ hRmSpiSlink->BusyHints[3].BoostKHz = 240000; // Cpu
+ NvRmPowerBusyHintMulti(hRmSpiSlink->hDevice, hRmSpiSlink->RmPowerClientId,
+ hRmSpiSlink->BusyHints, 4,
+ NvRmDfsBusyHintSyncMode_Async);
+
+ // Enable power for spi/slink module
+ Error = NvRmPowerVoltageControl(hRmSpiSlink->hDevice, ModuleId,
+ hRmSpiSlink->RmPowerClientId,
+ NvRmVoltsUnspecified, NvRmVoltsUnspecified,
+ NULL, 0, NULL);
+ // Enable the clock.
+ if (!Error)
+ Error = NvRmPowerModuleClockControl(hRmSpiSlink->hDevice, ModuleId,
+ hRmSpiSlink->RmPowerClientId, NV_TRUE);
+ }
+ else
+ {
+ // Disable the clocks.
+ (void)NvRmPowerModuleClockControl(hRmSpiSlink->hDevice, ModuleId,
+ hRmSpiSlink->RmPowerClientId, NV_FALSE);
+ hRmSpiSlink->BusyHints[0].BoostKHz = 0; // Emc
+ hRmSpiSlink->BusyHints[1].BoostKHz = 0; // Ahb
+ hRmSpiSlink->BusyHints[2].BoostKHz = 0; // Apb
+ hRmSpiSlink->BusyHints[3].BoostKHz = 0; // Cpu
+
+ NvRmPowerBusyHintMulti(hRmSpiSlink->hDevice, hRmSpiSlink->RmPowerClientId,
+ hRmSpiSlink->BusyHints, 4,
+ NvRmDfsBusyHintSyncMode_Async);
+
+ // Disable the power to the controller.
+ (void)NvRmPowerVoltageControl(hRmSpiSlink->hDevice, ModuleId,
+ hRmSpiSlink->RmPowerClientId,
+ NvRmVoltsOff, NvRmVoltsOff,
+ NULL, 0, NULL);
+ }
+ return Error;
+}
+
+/**
+ * Destroy the handle of spi channel and free all the allocation done for it.
+ * Thread safety: Caller responsibity.
+ */
+static void DestroySpiSlinkChannelHandle(NvRmSpiHandle hRmSpiSlink)
+{
+ NvU32 HandleStartIndex;
+#if !NV_OAL
+ NvRmInterruptUnregister(hRmSpiSlink->hDevice, hRmSpiSlink->SpiInterruptHandle);
+ hRmSpiSlink->SpiInterruptHandle = NULL;
+#endif
+
+
+ // Unmap the virtual mapping of the spi hw register.
+ NvRmPhysicalMemUnmap(hRmSpiSlink->HwRegs.pRegsBaseAdd, hRmSpiSlink->HwRegs.RegBankSize);
+
+ // the clocks should already be disabled for Non-oal. don't disable it here for non-oal
+ // For oal disable here.
+#if NV_OAL
+ (void)SetPowerControl(hRmSpiSlink, NV_FALSE);
+#endif
+
+#if !NV_OAL
+ // Unregister for the power manager.
+ NvRmPowerUnRegister(hRmSpiSlink->hDevice, hRmSpiSlink->RmPowerClientId);
+#endif
+
+ // Tri-State the pin-mux pins
+ NV_ASSERT_SUCCESS(NvRmSetModuleTristate(hRmSpiSlink->hDevice,
+ NVRM_MODULE_ID(hRmSpiSlink->RmModuleId,hRmSpiSlink->InstanceId), NV_TRUE));
+
+ NvOsFree(hRmSpiSlink->pTxCpuBuffer);
+ NvOsFree(hRmSpiSlink->pRxCpuBuffer);
+
+ if (hRmSpiSlink->hRmRxDma)
+ {
+ NvRmDmaAbort(hRmSpiSlink->hRmRxDma);
+ NvRmDmaFree(hRmSpiSlink->hRmRxDma);
+ }
+
+ if (hRmSpiSlink->hRmTxDma)
+ {
+ NvRmDmaAbort(hRmSpiSlink->hRmTxDma);
+ NvRmDmaFree(hRmSpiSlink->hRmTxDma);
+ }
+
+ DestroyDmaTransferBuffer(hRmSpiSlink->hRmMemory, hRmSpiSlink->pRxDmaBuffer,
+ hRmSpiSlink->pTxDmaBuffer, hRmSpiSlink->DmaBufferSize);
+
+#if !NV_OAL
+ // Destroy the mutex allocated for the channel accss.
+ NvOsMutexDestroy(hRmSpiSlink->hChannelAccessMutex);
+
+ // Destroy the sync sempahores.
+ NvOsSemaphoreDestroy(hRmSpiSlink->hSynchSema);
+#endif
+
+ HandleStartIndex = (hRmSpiSlink->IsSpiChannel)? 0: MAX_SPI_CONTROLLERS;
+ s_SpiSlinkInfo.hSpiSlinkChannelList[HandleStartIndex + hRmSpiSlink->InstanceId] = NULL;
+
+ // Free the memory of the spi handles.
+ NvOsFree(hRmSpiSlink);
+}
+
+
+/**
+ * Create the handle for the spi channel.
+ * Thread safety: Caller responsibity.
+ */
+static NvError CreateSpiSlinkChannelHandle(
+ NvRmDeviceHandle hDevice,
+ NvBool IsSpiChannel,
+ NvU32 InstanceId,
+ NvBool IsMasterMode,
+ NvRmSpiHandle *phSpiSlinkChannel)
+{
+ NvError Error = NvSuccess;
+ NvRmSpiHandle hRmSpiSlink = NULL;
+ NvRmModuleID ModuleId;
+ NvU32 ChipSelIndex;
+ NvU32 InstIndexOffset = (IsSpiChannel)? 0: MAX_SPI_CONTROLLERS;
+ const NvU32 *pOdmConfigs;
+ NvU32 NumOdmConfigs;
+ NvU32 CpuBufferSize;
+ NvRmDmaModuleID DmaModuleId;
+ const NvOdmQuerySpiIdleSignalState *pSignalState = NULL;
+
+ *phSpiSlinkChannel = NULL;
+
+ // Allcoate the memory for the spi handle.
+ hRmSpiSlink = NvOsAlloc(sizeof(NvRmSpi));
+ if (!hRmSpiSlink)
+ return NvError_InsufficientMemory;
+
+ NvOsMemset(hRmSpiSlink, 0, sizeof(NvRmSpi));
+
+ // Set the spi handle parameters.
+ hRmSpiSlink->hDevice = hDevice;
+ hRmSpiSlink->InstanceId = InstanceId;
+ hRmSpiSlink->IsSpiChannel = IsSpiChannel;
+ hRmSpiSlink->IsMasterMode = IsMasterMode;
+ hRmSpiSlink->RmModuleId = (IsSpiChannel)?NvRmModuleID_Spi: NvRmModuleID_Slink;
+ hRmSpiSlink->RmIoModuleId = (IsSpiChannel)?NvOdmIoModule_Sflash: NvOdmIoModule_Spi;
+ hRmSpiSlink->OpenCount = 1;
+ hRmSpiSlink->IsApbDmaAllocated = NV_FALSE;
+ hRmSpiSlink->TransCountFromLastDmaUsage = 0;
+ hRmSpiSlink->hRmRxDma = NULL;
+ hRmSpiSlink->hRmMemory = NULL;
+ hRmSpiSlink->hRmTxDma = NULL;
+ hRmSpiSlink->DmaRxBuffPhysAdd = 0;
+ hRmSpiSlink->DmaTxBuffPhysAdd = 0;
+ hRmSpiSlink->pRxDmaBuffer = NULL;
+ hRmSpiSlink->pTxDmaBuffer = NULL;
+ hRmSpiSlink->pTxCpuBuffer = NULL;
+ hRmSpiSlink->pRxCpuBuffer = NULL;
+ hRmSpiSlink->CpuBufferSizeInWords = 0;
+ hRmSpiSlink->hHwInterface = NULL;
+ hRmSpiSlink->RmPowerClientId = 0;
+ hRmSpiSlink->SpiPinMap = 0;
+
+ // Initialize the frequncy requirements array
+ hRmSpiSlink->BusyHints[0].ClockId = NvRmDfsClockId_Emc;
+ hRmSpiSlink->BusyHints[0].BoostDurationMs = NV_WAIT_INFINITE;
+ hRmSpiSlink->BusyHints[0].BusyAttribute = NV_TRUE;
+
+ hRmSpiSlink->BusyHints[1].ClockId = NvRmDfsClockId_Ahb;
+ hRmSpiSlink->BusyHints[1].BoostDurationMs = NV_WAIT_INFINITE;
+ hRmSpiSlink->BusyHints[1].BusyAttribute = NV_TRUE;
+
+ hRmSpiSlink->BusyHints[2].ClockId = NvRmDfsClockId_Apb;
+ hRmSpiSlink->BusyHints[2].BoostDurationMs = NV_WAIT_INFINITE;
+ hRmSpiSlink->BusyHints[2].BusyAttribute = NV_TRUE;
+
+ hRmSpiSlink->BusyHints[3].ClockId = NvRmDfsClockId_Cpu;
+ hRmSpiSlink->BusyHints[3].BoostDurationMs = NV_WAIT_INFINITE;
+ hRmSpiSlink->BusyHints[3].BusyAttribute = NV_TRUE;
+
+ ModuleId = NVRM_MODULE_ID(hRmSpiSlink->RmModuleId, InstanceId);
+
+ if (IsSpiChannel)
+ hRmSpiSlink->hHwInterface = &s_SpiHwInterface;
+ else
+ hRmSpiSlink->hHwInterface = &s_SlinkHwInterface;
+
+ for (ChipSelIndex = 0; ChipSelIndex < MAX_CHIPSELECT_PER_INSTANCE; ++ChipSelIndex)
+ hRmSpiSlink->IsChipSelSupported[ChipSelIndex] =
+ SpiSlinkGetDeviceInfo(IsSpiChannel, InstanceId, ChipSelIndex,
+ &hRmSpiSlink->DeviceInfo[ChipSelIndex]);
+ // Get the odm pin map
+ NvOdmQueryPinMux(hRmSpiSlink->RmIoModuleId, &pOdmConfigs, &NumOdmConfigs);
+ NV_ASSERT((InstanceId < NumOdmConfigs) && (pOdmConfigs[InstanceId]));
+ hRmSpiSlink->SpiPinMap = pOdmConfigs[InstanceId];
+
+ pSignalState = NvOdmQuerySpiGetIdleSignalState(hRmSpiSlink->RmIoModuleId, InstanceId);
+ if (pSignalState)
+ {
+ hRmSpiSlink->IsIdleSignalTristate = pSignalState->IsTristate;
+ hRmSpiSlink->HwRegs.IdleSignalMode = pSignalState->SignalMode;
+ hRmSpiSlink->HwRegs.IsIdleDataOutHigh = pSignalState->IsIdleDataOutHigh;
+ }
+ else
+ {
+ hRmSpiSlink->IsIdleSignalTristate = NV_FALSE;
+ hRmSpiSlink->HwRegs.IdleSignalMode = NvOdmQuerySpiSignalMode_0;
+ hRmSpiSlink->HwRegs.IsIdleDataOutHigh = NV_FALSE;
+ }
+ Error = NvRmSetModuleTristate(hRmSpiSlink->hDevice, ModuleId,
+ hRmSpiSlink->IsIdleSignalTristate);
+ if (Error)
+ {
+ // If error then return from here.
+ NvOsFree(hRmSpiSlink);
+ return Error;
+ }
+
+ hRmSpiSlink->RxTransferStatus = NvSuccess;
+ hRmSpiSlink->TxTransferStatus = NvSuccess;
+
+ hRmSpiSlink->hHwInterface->HwRegisterInitializeFxn(InstanceId, &hRmSpiSlink->HwRegs);
+
+#if !NV_OAL
+ // Create the mutex for channel access.
+ if (!Error)
+ Error = NvOsMutexCreate(&hRmSpiSlink->hChannelAccessMutex);
+
+ // Create the synchronous semaphores.
+ if (!Error)
+ Error = NvOsSemaphoreCreate(&hRmSpiSlink->hSynchSema, 0);
+#endif
+
+ // Get the spi hw physical base address and map in virtual memory space.
+ if (!Error)
+ {
+ NvRmPhysAddr SpiSlinkPhysAddr;
+ NvRmModuleGetBaseAddress(hDevice, ModuleId,
+ &SpiSlinkPhysAddr, &hRmSpiSlink->HwRegs.RegBankSize);
+
+ hRmSpiSlink->HwRegs.HwRxFifoAdd += SpiSlinkPhysAddr;
+ hRmSpiSlink->HwRegs.HwTxFifoAdd += SpiSlinkPhysAddr;
+ Error = NvRmPhysicalMemMap(SpiSlinkPhysAddr,
+ hRmSpiSlink->HwRegs.RegBankSize, NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached,
+ (void **)&hRmSpiSlink->HwRegs.pRegsBaseAdd);
+ }
+
+ // Allocate the dma buffer and the dma channel
+ if (!Error)
+ {
+ hRmSpiSlink->IsApbDmaAllocated = NV_TRUE;
+
+ // Don't go to the dma allocation if the oal and master mode.
+ // It creates the download issue using the spi kitl if dma mode is used.
+#if NV_OAL
+ if (IsMasterMode)
+ {
+ Error = NvError_NotSupported;
+ }
+ else
+ {
+ Error = CreateDmaTransferBuffer(hRmSpiSlink->hDevice, &hRmSpiSlink->hRmMemory,
+ &hRmSpiSlink->DmaRxBuffPhysAdd, (void **)&hRmSpiSlink->pRxDmaBuffer,
+ &hRmSpiSlink->DmaTxBuffPhysAdd, (void **)&hRmSpiSlink->pTxDmaBuffer,
+ DEFAULT_DMA_BUFFER_SIZE);
+ }
+#else
+ Error = CreateDmaTransferBuffer(hRmSpiSlink->hDevice, &hRmSpiSlink->hRmMemory,
+ &hRmSpiSlink->DmaRxBuffPhysAdd, (void **)&hRmSpiSlink->pRxDmaBuffer,
+ &hRmSpiSlink->DmaTxBuffPhysAdd, (void **)&hRmSpiSlink->pTxDmaBuffer,
+ DEFAULT_DMA_BUFFER_SIZE);
+#endif
+ if (!Error)
+ {
+ hRmSpiSlink->DmaBufferSize = DEFAULT_DMA_BUFFER_SIZE;
+ DmaModuleId = (IsSpiChannel)?NvRmDmaModuleID_Spi: NvRmDmaModuleID_Slink;
+
+ // Allocate the dma (for Rx and for Tx) with high priority
+ // Allocate dma now only for the slave mode handle.
+ // For master mode, it will be allaocated based on the transaction size
+ // to make it adaptive.
+ if (!IsMasterMode)
+ {
+ Error = NvRmDmaAllocate(hRmSpiSlink->hDevice, &hRmSpiSlink->hRmRxDma,
+ NV_FALSE, NvRmDmaPriority_High, DmaModuleId,
+ hRmSpiSlink->InstanceId);
+ if (!Error)
+ {
+ Error = NvRmDmaAllocate(hRmSpiSlink->hDevice, &hRmSpiSlink->hRmTxDma,
+ NV_FALSE, NvRmDmaPriority_High, DmaModuleId,
+ hRmSpiSlink->InstanceId);
+ if (Error)
+ NvRmDmaFree(hRmSpiSlink->hRmRxDma);
+ }
+ if (Error)
+ {
+ DestroyDmaTransferBuffer(hRmSpiSlink->hRmMemory,
+ hRmSpiSlink->pRxDmaBuffer, hRmSpiSlink->pTxDmaBuffer,
+ hRmSpiSlink->DmaBufferSize);
+ }
+ }
+ else
+ {
+ hRmSpiSlink->IsApbDmaAllocated = NV_FALSE;
+ hRmSpiSlink->hRmRxDma = NULL;
+ hRmSpiSlink->hRmTxDma = NULL;
+ }
+ }
+ if (Error)
+ {
+ hRmSpiSlink->IsApbDmaAllocated = NV_FALSE;
+ hRmSpiSlink->hRmRxDma = NULL;
+ hRmSpiSlink->hRmMemory = NULL;
+ hRmSpiSlink->hRmTxDma = NULL;
+ hRmSpiSlink->DmaRxBuffPhysAdd = 0;
+ hRmSpiSlink->DmaTxBuffPhysAdd = 0;
+ hRmSpiSlink->pRxDmaBuffer = NULL;
+ hRmSpiSlink->pTxDmaBuffer = NULL;
+ Error = NvSuccess;
+ }
+ else
+ {
+ hRmSpiSlink->RxDmaReq.SourceBufferPhyAddress= hRmSpiSlink->HwRegs.HwRxFifoAdd;
+ hRmSpiSlink->RxDmaReq.DestinationBufferPhyAddress = hRmSpiSlink->DmaRxBuffPhysAdd;
+ hRmSpiSlink->RxDmaReq.SourceAddressWrapSize = 4;
+ hRmSpiSlink->RxDmaReq.DestinationAddressWrapSize = 0;
+
+ hRmSpiSlink->TxDmaReq.SourceBufferPhyAddress= hRmSpiSlink->DmaTxBuffPhysAdd;
+ hRmSpiSlink->TxDmaReq.DestinationBufferPhyAddress = hRmSpiSlink->HwRegs.HwTxFifoAdd;
+ hRmSpiSlink->TxDmaReq.SourceAddressWrapSize = 0;
+ hRmSpiSlink->TxDmaReq.DestinationAddressWrapSize = 4;
+ }
+ }
+
+ if (!Error)
+ {
+ // If dma is allocated then allocate the less size of the cpu buffer
+ // otherwise allocate bigger size to get the optimized timing execution.
+ CpuBufferSize = (hRmSpiSlink->IsApbDmaAllocated)?
+ (MAX_CPU_TRANSACTION_SIZE_WORD << 2): DEFAULT_DMA_BUFFER_SIZE;
+
+ hRmSpiSlink->pRxCpuBuffer = NvOsAlloc(CpuBufferSize);
+ if (!hRmSpiSlink->pRxCpuBuffer)
+ Error = NvError_InsufficientMemory;
+
+ if (!Error)
+ {
+ hRmSpiSlink->pTxCpuBuffer = NvOsAlloc(CpuBufferSize);
+ if (!hRmSpiSlink->pTxCpuBuffer)
+ Error = NvError_InsufficientMemory;
+ }
+ if (!Error)
+ hRmSpiSlink->CpuBufferSizeInWords = CpuBufferSize >> 2;
+ }
+
+#if !NV_OAL
+ // Register slink/spi for Rm power client
+ if (!Error)
+ Error = NvRmPowerRegister(hRmSpiSlink->hDevice, NULL, &hRmSpiSlink->RmPowerClientId);
+#endif
+
+ // Enable Power/Clock.
+ if (!Error)
+ Error = SetPowerControl(hRmSpiSlink, NV_TRUE);
+
+ // Reset the module.
+ if (!Error)
+ NvRmModuleReset(hDevice, ModuleId);
+
+#if !NV_OAL
+ // Register the interrupt.
+ if (!Error)
+ Error = RegisterSpiSlinkInterrupt(hDevice, hRmSpiSlink, InstanceId);
+#endif
+
+ // Initialize the controller register.
+ if (!Error)
+ {
+ // Set the default signal mode of the spi channel.
+ hRmSpiSlink->hHwInterface->HwSetSignalModeFxn(&hRmSpiSlink->HwRegs, hRmSpiSlink->HwRegs.IdleSignalMode);
+
+ // Set chip select to non active state.
+ hRmSpiSlink->hHwInterface->HwControllerInitializeFxn(&hRmSpiSlink->HwRegs);
+ for (ChipSelIndex = 0; ChipSelIndex < MAX_CHIPSELECT_PER_INSTANCE; ++ChipSelIndex)
+ {
+ hRmSpiSlink->IsCurrentChipSelStateHigh[ChipSelIndex] = NV_TRUE;
+ if (hRmSpiSlink->IsChipSelSupported[ChipSelIndex])
+ {
+ hRmSpiSlink->IsCurrentChipSelStateHigh[ChipSelIndex] =
+ hRmSpiSlink->DeviceInfo[ChipSelIndex].ChipSelectActiveLow;
+ hRmSpiSlink->hHwInterface->HwSetChipSelectDefaultLevelFxn(
+ &hRmSpiSlink->HwRegs, ChipSelIndex,
+ hRmSpiSlink->IsCurrentChipSelStateHigh[ChipSelIndex]);
+ }
+ }
+ // Let chipselect to be stable for 1 ms before doing any transaction.
+ NvOsWaitUS(1000);
+#if !NV_OAL
+ // switch off clock and power to the slink module by default.
+ Error = SetPowerControl(hRmSpiSlink, NV_FALSE);
+#endif
+ }
+
+ // If error then destroy all the allocation done here.
+ if (Error)
+ {
+ DestroySpiSlinkChannelHandle(hRmSpiSlink);
+ hRmSpiSlink = NULL;
+ }
+
+ *phSpiSlinkChannel = hRmSpiSlink;
+ s_SpiSlinkInfo.hSpiSlinkChannelList[InstanceId + InstIndexOffset] = hRmSpiSlink;
+ return Error;
+}
+
+/**
+ * Set the chip select signal level to be active or inactive.
+ */
+static NvError
+SetChipSelectSignalLevel(
+ NvRmSpiHandle hRmSpiSlink,
+ NvU32 ChipSelectId,
+ NvU32 ClockSpeedInKHz,
+ NvBool IsActive)
+{
+ NvError Error = NvSuccess;
+ NvBool IsHigh;
+ NvRmModuleID ModuleId;
+ NvU32 PrefClockFreqInKHz;
+ NvU32 ConfiguredClockFreqInKHz = 0;
+ NvOdmQuerySpiDeviceInfo *pDevInfo = &hRmSpiSlink->DeviceInfo[ChipSelectId];
+ HwInterfaceHandle hHwIntf = hRmSpiSlink->hHwInterface;
+ if (IsActive)
+ {
+ if (ClockSpeedInKHz != hRmSpiSlink->ClockFreqInKHz)
+ {
+ ModuleId = NVRM_MODULE_ID(hRmSpiSlink->RmModuleId, hRmSpiSlink->InstanceId);
+
+ // The slink clock source should be 4 times of the interface clock speed
+ PrefClockFreqInKHz = (hRmSpiSlink->RmModuleId == NvRmModuleID_Slink)?
+ (ClockSpeedInKHz << 2): (ClockSpeedInKHz);
+ Error = NvRmPowerModuleClockConfig(hRmSpiSlink->hDevice,
+ ModuleId, 0, PrefClockFreqInKHz,
+ NvRmFreqUnspecified, &PrefClockFreqInKHz,
+ 1, &ConfiguredClockFreqInKHz, 0);
+ if (Error)
+ return Error;
+
+ hRmSpiSlink->ClockFreqInKHz = ClockSpeedInKHz;
+ }
+
+ if (pDevInfo->SignalMode != hRmSpiSlink->HwRegs.CurrSignalMode)
+ hHwIntf->HwSetSignalModeFxn(&hRmSpiSlink->HwRegs, pDevInfo->SignalMode);
+
+ if (hRmSpiSlink->IsMasterMode != hRmSpiSlink->HwRegs.IsMasterMode)
+ hHwIntf->HwSetFunctionalModeFxn(&hRmSpiSlink->HwRegs, hRmSpiSlink->IsMasterMode);
+
+ IsHigh = (pDevInfo->ChipSelectActiveLow)? NV_FALSE: NV_TRUE;
+ hHwIntf->HwSetChipSelectLevelFxn(&hRmSpiSlink->HwRegs, ChipSelectId, IsHigh);
+ hRmSpiSlink->CurrTransferChipSelId = ChipSelectId;
+ }
+ else
+ {
+ IsHigh = (pDevInfo->ChipSelectActiveLow)? NV_TRUE: NV_FALSE;
+ hHwIntf->HwSetChipSelectLevelFxn(&hRmSpiSlink->HwRegs, ChipSelectId, IsHigh);
+ if (hRmSpiSlink->HwRegs.IdleSignalMode != hRmSpiSlink->HwRegs.CurrSignalMode)
+ hHwIntf->HwSetSignalModeFxn(&hRmSpiSlink->HwRegs, hRmSpiSlink->HwRegs.IdleSignalMode);
+ }
+ hRmSpiSlink->IsCurrentChipSelStateHigh[ChipSelectId] = IsHigh;
+ return NvSuccess;
+}
+
+
+static void
+MakeMasterSpiBufferFromClientBuffer(
+ NvU8 *pTxBuffer,
+ NvU32 *pSpiBuffer,
+ NvU32 BytesRequested,
+ NvU32 PacketBitLength,
+ NvU32 IsPackedMode)
+{
+ NvU32 Shift0;
+ NvU32 MSBMaskData = 0xFF;
+ NvU32 BytesPerPackets;
+ NvU32 Index;
+ NvU32 PacketRequest;
+
+ if (IsPackedMode)
+ {
+ if (PacketBitLength == 8)
+ {
+ NvOsMemcpy(pSpiBuffer, pTxBuffer, BytesRequested);
+ return;
+ }
+
+ BytesPerPackets = (PacketBitLength + 7)/8;
+ PacketRequest = BytesRequested / BytesPerPackets;
+ if (PacketBitLength == 16)
+ {
+ NvU16 *pOutBuffer = (NvU16 *)pSpiBuffer;
+ for (Index =0; Index < PacketRequest; ++Index)
+ {
+ *pOutBuffer++ = (NvU16)(((*(pTxBuffer )) << 8) |
+ ((*(pTxBuffer+1))& 0xFF));
+ pTxBuffer += 2;
+ }
+ return;
+ }
+ }
+
+ BytesPerPackets = (PacketBitLength + 7)/8;
+ PacketRequest = BytesRequested / BytesPerPackets;
+
+ Shift0 = (PacketBitLength & 7);
+ if (Shift0)
+ MSBMaskData = (0xFF >> (8-Shift0));
+
+ if (BytesPerPackets == 1)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ *pSpiBuffer++ = (NvU32)((*(pTxBuffer))& MSBMaskData);
+ pTxBuffer++;
+ }
+ return;
+ }
+
+ if (BytesPerPackets == 2)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ *pSpiBuffer++ = (NvU32)((((*(pTxBuffer))& MSBMaskData) << 8) |
+ ((*(pTxBuffer+1))));
+ pTxBuffer += 2;
+ }
+ return;
+ }
+
+ if (BytesPerPackets == 3)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ *pSpiBuffer++ = (NvU32)((((*(pTxBuffer)) & MSBMaskData) << 16) |
+ ((*(pTxBuffer+1)) << 8) |
+ ((*(pTxBuffer+2))));
+ pTxBuffer += 3;
+ }
+ return;
+ }
+
+ if (BytesPerPackets == 4)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ *pSpiBuffer++ = (NvU32)((((*(pTxBuffer))& MSBMaskData) << 24) |
+ ((*(pTxBuffer+1)) << 16) |
+ ((*(pTxBuffer+2)) << 8) |
+ ((*(pTxBuffer+3))));
+ pTxBuffer += 4;
+ }
+ return;
+ }
+}
+
+// Similar to MakeMasterSpiBufferFromClientBuffer() except that SPI slave byte order
+// is reversed compared to SPI master
+static void
+MakeSlaveSpiBufferFromClientBuffer(
+ NvU8 *pTxBuffer,
+ NvU32 *pSpiBuffer,
+ NvU32 BytesRequested,
+ NvU32 PacketBitLength,
+ NvU32 IsPackedMode)
+{
+ NvU32 Shift0;
+ NvU32 MSBMaskData = 0xFF;
+ NvU32 BytesPerPackets;
+ NvU32 Index;
+ NvU32 PacketRequest;
+
+ if (IsPackedMode)
+ {
+ /* SPI slave byte order matches processor endianness, so memcpy can be used */
+ if ((PacketBitLength == 8) || (PacketBitLength == 16))
+ {
+ NvOsMemcpy(pSpiBuffer, pTxBuffer, BytesRequested);
+ return;
+ }
+ }
+
+ BytesPerPackets = (PacketBitLength + 7)/8;
+ PacketRequest = BytesRequested / BytesPerPackets;
+
+ Shift0 = (PacketBitLength & 7);
+ if (Shift0)
+ MSBMaskData = (0xFF >> (8-Shift0));
+
+ if (BytesPerPackets == 1)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ *pSpiBuffer++ = (NvU32)((*(pTxBuffer))& MSBMaskData);
+ pTxBuffer++;
+ }
+ return;
+ }
+
+ if (BytesPerPackets == 2)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ *pSpiBuffer++ = (NvU32)((((*(pTxBuffer+1))& MSBMaskData) << 8) |
+ ((*(pTxBuffer))));
+ pTxBuffer += 2;
+ }
+ return;
+ }
+
+ if (BytesPerPackets == 3)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ *pSpiBuffer++ = (NvU32)((((*(pTxBuffer+2)) & MSBMaskData) << 16) |
+ ((*(pTxBuffer+1)) << 8) |
+ ((*(pTxBuffer))));
+ pTxBuffer += 3;
+ }
+ return;
+ }
+
+ if (BytesPerPackets == 4)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ *pSpiBuffer++ = (NvU32)((((*(pTxBuffer+3))& MSBMaskData) << 24) |
+ ((*(pTxBuffer+2)) << 16) |
+ ((*(pTxBuffer+1)) << 8) |
+ ((*(pTxBuffer))));
+ pTxBuffer += 4;
+ }
+ return;
+ }
+}
+
+static void
+MakeMasterClientBufferFromSpiBuffer(
+ NvU8 *pRxBuffer,
+ NvU32 *pSpiBuffer,
+ NvU32 BytesRequested,
+ NvU32 PacketBitLength,
+ NvU32 IsPackedMode)
+{
+ NvU32 Shift0;
+ NvU32 MSBMaskData = 0xFF;
+ NvU32 BytesPerPackets;
+ NvU32 Index;
+ NvU32 RxData;
+ NvU32 PacketRequest;
+
+ NvU8 *pOutBuffer = NULL;
+
+ if (IsPackedMode)
+ {
+ if (PacketBitLength == 8)
+ {
+ NvOsMemcpy(pRxBuffer, pSpiBuffer, BytesRequested);
+ return;
+ }
+
+ BytesPerPackets = (PacketBitLength + 7)/8;
+ PacketRequest = BytesRequested / BytesPerPackets;
+ if (PacketBitLength == 16)
+ {
+ pOutBuffer = (NvU8 *)pSpiBuffer;
+ for (Index =0; Index < PacketRequest; ++Index)
+ {
+ *pRxBuffer++ = (NvU8) (*(pOutBuffer+1));
+ *pRxBuffer++ = (NvU8) (*(pOutBuffer));
+ pOutBuffer += 2;
+ }
+ return;
+ }
+ }
+
+ BytesPerPackets = (PacketBitLength + 7)/8;
+ PacketRequest = BytesRequested / BytesPerPackets;
+ Shift0 = (PacketBitLength & 7);
+ if (Shift0)
+ MSBMaskData = (0xFF >> (8-Shift0));
+
+ if (BytesPerPackets == 1)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ *pRxBuffer++ = (NvU8)((*pSpiBuffer++) & MSBMaskData);
+ return;
+ }
+
+ if (BytesPerPackets == 2)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ RxData = *pSpiBuffer++;
+ *pRxBuffer++ = (NvU8)((RxData >> 8) & MSBMaskData);
+ *pRxBuffer++ = (NvU8)((RxData) & 0xFF);
+ }
+ return;
+ }
+
+ if (BytesPerPackets == 3)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ RxData = *pSpiBuffer++;
+ *pRxBuffer++ = (NvU8)((RxData >> 16)& MSBMaskData);
+ *pRxBuffer++ = (NvU8)((RxData >> 8)& 0xFF);
+ *pRxBuffer++ = (NvU8)((RxData) & 0xFF);
+ }
+ return;
+ }
+
+ if (BytesPerPackets == 4)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ RxData = *pSpiBuffer++;
+ *pRxBuffer++ = (NvU8)((RxData >> 24)& MSBMaskData);
+ *pRxBuffer++ = (NvU8)((RxData >> 16)& 0xFF);
+ *pRxBuffer++ = (NvU8)((RxData >> 8)& 0xFF);
+ *pRxBuffer++ = (NvU8)((RxData) & 0xFF);
+ }
+ return;
+ }
+}
+
+// Similar to MakeMasterClientBufferFromSpiBuffer() except that SPI slave byte order
+// is reversed compared to SPI master
+static void
+MakeSlaveClientBufferFromSpiBuffer(
+ NvU8 *pRxBuffer,
+ NvU32 *pSpiBuffer,
+ NvU32 BytesRequested,
+ NvU32 PacketBitLength,
+ NvU32 IsPackedMode)
+{
+ NvU32 Shift0;
+ NvU32 MSBMaskData = 0xFF;
+ NvU32 BytesPerPackets;
+ NvU32 Index;
+ NvU32 RxData;
+ NvU32 PacketRequest;
+
+ if (IsPackedMode)
+ {
+ /* SPI slave byte order matches processor endianness, so memcpy can be used */
+ if ((PacketBitLength == 8) || (PacketBitLength == 16))
+ {
+ NvOsMemcpy(pRxBuffer, pSpiBuffer, BytesRequested);
+ return;
+ }
+ }
+
+ BytesPerPackets = (PacketBitLength + 7)/8;
+ PacketRequest = BytesRequested / BytesPerPackets;
+ Shift0 = (PacketBitLength & 7);
+ if (Shift0)
+ MSBMaskData = (0xFF >> (8-Shift0));
+
+ if (BytesPerPackets == 1)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ *pRxBuffer++ = (NvU8)((*pSpiBuffer++) & MSBMaskData);
+ return;
+ }
+
+ if (BytesPerPackets == 2)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ RxData = *pSpiBuffer++;
+ *pRxBuffer++ = (NvU8)((RxData) & 0xFF);
+ *pRxBuffer++ = (NvU8)((RxData >> 8) & MSBMaskData);
+ }
+ return;
+ }
+
+ if (BytesPerPackets == 3)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ RxData = *pSpiBuffer++;
+ *pRxBuffer++ = (NvU8)((RxData) & 0xFF);
+ *pRxBuffer++ = (NvU8)((RxData >> 8)& 0xFF);
+ *pRxBuffer++ = (NvU8)((RxData >> 16)& MSBMaskData);
+ }
+ return;
+ }
+
+ if (BytesPerPackets == 4)
+ {
+ for (Index = 0; Index < PacketRequest; ++Index)
+ {
+ RxData = *pSpiBuffer++;
+ *pRxBuffer++ = (NvU8)((RxData) & 0xFF);
+ *pRxBuffer++ = (NvU8)((RxData >> 8)& 0xFF);
+ *pRxBuffer++ = (NvU8)((RxData >> 16)& 0xFF);
+ *pRxBuffer++ = (NvU8)((RxData >> 24)& MSBMaskData);
+ }
+ return;
+ }
+}
+
+static NvError
+MasterModeReadWriteCpu(
+ NvRmSpiHandle hRmSpiSlink,
+ NvU8 *pClientRxBuffer,
+ NvU8 *pClientTxBuffer,
+ NvU32 PacketsRequested,
+ NvU32 *pPacketsTransferred,
+ NvU32 IsPackedMode,
+ NvU32 PacketBitLength)
+{
+ NvError Error = NvSuccess;
+ NvU32 CurrentTransWord;
+ NvU32 BufferOffset = 0;
+ NvU32 WordsWritten;
+ NvU32 MaxPacketPerTrans;
+ NvU32 CurrentTransPacket;
+ NvU32 PacketsPerWord;
+ NvU32 MaxPacketTrans;
+ NvBool IsPolling;
+
+ hRmSpiSlink->CurrTransInfo.BytesPerPacket = (PacketBitLength + 7)/8;
+ PacketsPerWord = (IsPackedMode)? 4/hRmSpiSlink->CurrTransInfo.BytesPerPacket:1;
+
+ hRmSpiSlink->IsUsingApbDma = NV_FALSE;
+ hRmSpiSlink->CurrTransInfo.PacketsPerWord = PacketsPerWord;
+
+ MaxPacketPerTrans = hRmSpiSlink->CpuBufferSizeInWords*PacketsPerWord;
+ hRmSpiSlink->CurrTransInfo.TotalPacketsRemaining = PacketsRequested;
+
+ while (hRmSpiSlink->CurrTransInfo.TotalPacketsRemaining)
+ {
+ MaxPacketTrans = NV_MIN(hRmSpiSlink->CurrTransInfo.TotalPacketsRemaining, MaxPacketPerTrans);
+
+
+ // If hw does not support the nonword alined packed mode then
+ // Transfer the nearest word alligned packet first with packed mode
+ // and then the remaining packet in non packed mode.
+ if (hRmSpiSlink->HwRegs.IsNonWordAlignedPackModeSupported)
+ CurrentTransWord = (MaxPacketTrans + PacketsPerWord -1)/PacketsPerWord;
+ else
+ CurrentTransWord = (MaxPacketTrans)/PacketsPerWord;
+
+ if (!CurrentTransWord)
+ {
+ PacketsPerWord = 1;
+ CurrentTransWord = MaxPacketTrans;
+ hRmSpiSlink->hHwInterface->HwSetPacketLengthFxn(&hRmSpiSlink->HwRegs,
+ PacketBitLength, NV_FALSE);
+ hRmSpiSlink->CurrTransInfo.PacketsPerWord = PacketsPerWord;
+ IsPackedMode = NV_FALSE;
+ }
+
+ CurrentTransPacket = NV_MIN(MaxPacketTrans, CurrentTransWord*PacketsPerWord) ;
+
+ // Select polling if less number of transfer is required.
+ if (CurrentTransWord < SLINK_POLLING_HIGH_THRESOLD)
+ {
+ IsPolling = NV_TRUE;
+ hRmSpiSlink->hHwInterface->HwSetInterruptSourceFxn(&hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrentDirection, NV_FALSE);
+ }
+ else
+ {
+ IsPolling = NV_FALSE;
+ hRmSpiSlink->hHwInterface->HwSetInterruptSourceFxn(&hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrentDirection, NV_TRUE);
+ }
+ hRmSpiSlink->TxTransferStatus = NvSuccess;
+ hRmSpiSlink->RxTransferStatus = NvSuccess;
+ hRmSpiSlink->CurrTransInfo.PacketTransferred = 0;
+
+ if (pClientRxBuffer)
+ {
+ hRmSpiSlink->CurrTransInfo.pRxBuff = hRmSpiSlink->pRxCpuBuffer;
+ hRmSpiSlink->CurrTransInfo.RxPacketsRemaining = CurrentTransPacket;
+ }
+
+ if (pClientTxBuffer)
+ {
+ MakeMasterSpiBufferFromClientBuffer(pClientTxBuffer + BufferOffset,
+ hRmSpiSlink->pTxCpuBuffer, CurrentTransPacket*hRmSpiSlink->CurrTransInfo.BytesPerPacket,
+ PacketBitLength, IsPackedMode);
+ WordsWritten = hRmSpiSlink->hHwInterface->HwWriteInTransmitFifoFxn(
+ &hRmSpiSlink->HwRegs, hRmSpiSlink->pTxCpuBuffer, CurrentTransWord);
+
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount =
+ NV_MIN(WordsWritten*PacketsPerWord, CurrentTransPacket);
+
+ hRmSpiSlink->CurrTransInfo.pTxBuff =
+ hRmSpiSlink->pTxCpuBuffer + WordsWritten;
+ hRmSpiSlink->CurrTransInfo.TxPacketsRemaining = CurrentTransPacket -
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount;
+ }
+ else
+ {
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount =
+ NV_MIN(hRmSpiSlink->HwRegs.MaxWordTransfer*PacketsPerWord,
+ CurrentTransPacket);
+ }
+ hRmSpiSlink->hHwInterface->HwSetDmaTransferSizeFxn(&hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount);
+ hRmSpiSlink->hHwInterface->HwStartTransferFxn(&hRmSpiSlink->HwRegs, NV_TRUE);
+#if NV_OAL
+ OalMasterSpiSlinkPoll(hRmSpiSlink);
+#else
+ WaitForTransferCompletion(hRmSpiSlink, NV_WAIT_INFINITE, IsPolling);
+#endif
+ Error = (hRmSpiSlink->RxTransferStatus)? hRmSpiSlink->RxTransferStatus:
+ hRmSpiSlink->TxTransferStatus;
+ if (Error)
+ break;
+
+ if (pClientRxBuffer)
+ {
+ MakeMasterClientBufferFromSpiBuffer(pClientRxBuffer + BufferOffset,
+ hRmSpiSlink->pRxCpuBuffer, CurrentTransPacket*hRmSpiSlink->CurrTransInfo.BytesPerPacket,
+ PacketBitLength, IsPackedMode);
+ }
+
+ BufferOffset += CurrentTransPacket*hRmSpiSlink->CurrTransInfo.BytesPerPacket;
+ hRmSpiSlink->CurrTransInfo.TotalPacketsRemaining -= CurrentTransPacket;
+ }
+
+ *pPacketsTransferred = PacketsRequested - hRmSpiSlink->CurrTransInfo.TotalPacketsRemaining;
+ return Error;
+}
+
+static NvError MasterModeReadWriteDma(
+ NvRmSpiHandle hRmSpiSlink,
+ NvU8 *pClientRxBuffer,
+ NvU8 *pClientTxBuffer,
+ NvU32 PacketsRequested,
+ NvU32 *pPacketsTransferred,
+ NvU32 IsPackedMode,
+ NvU32 PacketBitLength)
+{
+ NvError Error = NvSuccess;
+ NvU32 CurrentTransWord;
+ NvU32 BufferOffset = 0;
+ NvU32 BytesPerPacket = (PacketBitLength +7)/8;
+ NvU32 MaxPacketPerTrans;
+ NvU32 CurrentTransPacket;
+ NvU32 PacketsRemaining;
+ NvU32 PacketsPerWord = (IsPackedMode)?4/BytesPerPacket:1;
+ NvU32 TriggerLevel;
+ NvU32 MaxPacketTransPossible;
+ NvU32 PackSend = 0;
+ NvU8 *pReadReqCpuBuffer = NULL;
+ NvU8 *pWriteReqCpuBuffer = NULL;
+ NvU32 WrittenWord;
+
+ hRmSpiSlink->IsUsingApbDma = NV_TRUE;
+ hRmSpiSlink->hHwInterface->HwSetInterruptSourceFxn(&hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrentDirection, NV_TRUE);
+
+ hRmSpiSlink->CurrTransInfo.PacketsPerWord = PacketsPerWord;
+
+
+ MaxPacketPerTrans = (hRmSpiSlink->DmaBufferSize >> 2)*PacketsPerWord;
+ PacketsRemaining = PacketsRequested;
+ while (PacketsRemaining)
+ {
+ MaxPacketTransPossible = NV_MIN(PacketsRemaining, MaxPacketPerTrans);
+
+ // If hw does not support the nonword alined packed mode then
+ // Transfer the nearest word alligned packet first with packed mode
+ // and then the remaining packet in non packed mode.
+ if (hRmSpiSlink->HwRegs.IsNonWordAlignedPackModeSupported)
+ CurrentTransWord = (MaxPacketTransPossible + PacketsPerWord -1)/PacketsPerWord;
+ else
+ CurrentTransWord = (MaxPacketTransPossible)/PacketsPerWord;
+
+ // For the non multiple of the 4 bytes, it can do the transfer using the
+ // cpu for the remaining transfer.
+ if (!CurrentTransWord)
+ {
+ if (pClientRxBuffer)
+ pReadReqCpuBuffer = (pClientRxBuffer + BufferOffset);
+ if (pClientTxBuffer)
+ pWriteReqCpuBuffer = (pClientTxBuffer + BufferOffset);
+
+ hRmSpiSlink->hHwInterface->HwSetPacketLengthFxn(&hRmSpiSlink->HwRegs,
+ PacketBitLength, NV_FALSE);
+ Error = MasterModeReadWriteCpu(hRmSpiSlink, pReadReqCpuBuffer,
+ pWriteReqCpuBuffer, MaxPacketTransPossible,
+ &PackSend, NV_FALSE, PacketBitLength);
+ PacketsRemaining -= PackSend;
+ break;
+ }
+ if (hRmSpiSlink->HwRegs.IsNonWordAlignedPackModeSupported)
+ CurrentTransPacket = MaxPacketTransPossible;
+ else
+ CurrentTransPacket = CurrentTransWord*PacketsPerWord;
+
+ hRmSpiSlink->TxTransferStatus = NvSuccess;
+ hRmSpiSlink->RxTransferStatus = NvSuccess;
+ hRmSpiSlink->CurrTransInfo.PacketTransferred = 0;
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount = CurrentTransPacket;
+
+ if (pClientRxBuffer)
+ hRmSpiSlink->CurrTransInfo.RxPacketsRemaining = CurrentTransPacket;
+
+ hRmSpiSlink->hHwInterface->HwSetDmaTransferSizeFxn(&hRmSpiSlink->HwRegs,
+ CurrentTransPacket);
+
+ TriggerLevel = (CurrentTransWord & 0x3)? 4: 16;
+ hRmSpiSlink->hHwInterface->HwSetTriggerLevelFxn(&hRmSpiSlink->HwRegs,
+ SerialHwFifo_Both , TriggerLevel);
+
+ if (pClientTxBuffer)
+ {
+ MakeMasterSpiBufferFromClientBuffer(pClientTxBuffer + BufferOffset,
+ hRmSpiSlink->pTxDmaBuffer, CurrentTransPacket*BytesPerPacket,
+ PacketBitLength, IsPackedMode);
+ hRmSpiSlink->CurrTransInfo.pTxBuff = hRmSpiSlink->pTxDmaBuffer;
+ hRmSpiSlink->TxDmaReq.TransferSize = CurrentTransWord *4;
+
+ // If transfer word is more than fifo size the use the dma
+ // otherwise direct write into the fifo.
+ if (CurrentTransWord >= hRmSpiSlink->HwRegs.MaxWordTransfer)
+ {
+ Error = NvRmDmaStartDmaTransfer(hRmSpiSlink->hRmTxDma,
+ &hRmSpiSlink->TxDmaReq, NvRmDmaDirection_Forward, 0, NULL);
+ // Wait till fifo full if the transfer size is more than fifo size
+ if (!Error)
+ {
+ do
+ {
+ if (hRmSpiSlink->hHwInterface->HwIsTransmitFifoFull(&hRmSpiSlink->HwRegs))
+ break;
+ } while(1);
+ }
+ }
+ else
+ {
+ WrittenWord = hRmSpiSlink->hHwInterface->HwWriteInTransmitFifoFxn(
+ &hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrTransInfo.pTxBuff,
+ CurrentTransWord);
+ if (WrittenWord != CurrentTransWord)
+ {
+ NV_ASSERT(WrittenWord == CurrentTransWord);
+ Error = NvError_Timeout;
+ }
+ }
+ }
+
+ if ((!Error) && (pClientRxBuffer))
+ {
+ hRmSpiSlink->RxDmaReq.TransferSize = CurrentTransWord *4;
+ Error = NvRmDmaStartDmaTransfer(hRmSpiSlink->hRmRxDma, &hRmSpiSlink->RxDmaReq,
+ NvRmDmaDirection_Forward, 0, NULL);
+ if ((Error) && (pClientTxBuffer))
+ NvRmDmaAbort(hRmSpiSlink->hRmTxDma);
+ }
+
+ if (!Error)
+ hRmSpiSlink->hHwInterface->HwStartTransferFxn(&hRmSpiSlink->HwRegs, NV_TRUE);
+
+ if (!Error)
+ WaitForTransferCompletion(hRmSpiSlink, NV_WAIT_INFINITE, NV_FALSE);
+
+ Error = (hRmSpiSlink->RxTransferStatus)? hRmSpiSlink->RxTransferStatus:
+ hRmSpiSlink->TxTransferStatus;
+ if (Error)
+ {
+ if (pClientRxBuffer)
+ NvRmDmaAbort(hRmSpiSlink->hRmRxDma);
+ if (pClientTxBuffer)
+ NvRmDmaAbort(hRmSpiSlink->hRmTxDma);
+ break;
+ }
+ if (pClientRxBuffer)
+ {
+ MakeMasterClientBufferFromSpiBuffer(pClientRxBuffer + BufferOffset,
+ hRmSpiSlink->pRxDmaBuffer, CurrentTransPacket*BytesPerPacket,
+ PacketBitLength, IsPackedMode);
+ }
+
+ BufferOffset += CurrentTransPacket*BytesPerPacket;
+ PacketsRemaining -= CurrentTransPacket;
+ }
+
+ hRmSpiSlink->hHwInterface->HwSetDataFlowFxn(&hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrentDirection, NV_FALSE);
+ hRmSpiSlink->hHwInterface->HwSetInterruptSourceFxn(&hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrentDirection, NV_FALSE);
+
+ *pPacketsTransferred = PacketsRequested - PacketsRemaining;
+ return Error;
+}
+static NvError SlaveModeSpiStartReadWriteCpu(
+ NvRmSpiHandle hRmSpiSlink,
+ NvBool IsReadTransfer,
+ NvU8 *pClientTxBuffer,
+ NvU32 PacketsRequested,
+ NvU32 IsPackedMode,
+ NvU32 PacketBitLength)
+{
+ NvError Error = NvSuccess;
+ NvU32 BytesPerPacket;
+ NvU32 WordsWritten;
+ NvU32 PacketsPerWord;
+ NvU32 TotalWordsRequested;
+
+ BytesPerPacket = (PacketBitLength + 7)/8;
+ PacketsPerWord = (IsPackedMode)? 4/BytesPerPacket: 1;
+ TotalWordsRequested = (PacketsRequested + PacketsPerWord -1)/PacketsPerWord;
+
+ hRmSpiSlink->IsUsingApbDma = NV_FALSE;
+
+ hRmSpiSlink->hHwInterface->HwSetPacketLengthFxn(&hRmSpiSlink->HwRegs,
+ PacketBitLength, IsPackedMode);
+
+ hRmSpiSlink->CurrTransInfo.PacketsPerWord = PacketsPerWord;
+ hRmSpiSlink->CurrTransInfo.BytesPerPacket = BytesPerPacket;
+ hRmSpiSlink->CurrTransInfo.PacketBitLength = PacketBitLength;
+ hRmSpiSlink->CurrTransInfo.IsPackedMode = IsPackedMode;
+
+ hRmSpiSlink->TxTransferStatus = NvSuccess;
+ hRmSpiSlink->RxTransferStatus = NvSuccess;
+
+ hRmSpiSlink->CurrTransInfo.PacketTransferred = 0;
+
+ hRmSpiSlink->CurrTransInfo.pRxBuff =
+ (IsReadTransfer)? hRmSpiSlink->pRxCpuBuffer: NULL;
+ hRmSpiSlink->CurrTransInfo.RxPacketsRemaining =
+ (IsReadTransfer)? PacketsRequested: 0;
+
+ hRmSpiSlink->CurrTransInfo.PacketRequested = PacketsRequested;
+
+ hRmSpiSlink->CurrTransInfo.pTxBuff = NULL;
+ hRmSpiSlink->CurrTransInfo.TxPacketsRemaining = 0;
+
+ WordsWritten = hRmSpiSlink->HwRegs.MaxWordTransfer;
+
+ if (pClientTxBuffer)
+ {
+ MakeSlaveSpiBufferFromClientBuffer(pClientTxBuffer, hRmSpiSlink->pTxCpuBuffer,
+ PacketsRequested*BytesPerPacket, PacketBitLength,
+ IsPackedMode);
+ WordsWritten = hRmSpiSlink->hHwInterface->HwWriteInTransmitFifoFxn(
+ &hRmSpiSlink->HwRegs, hRmSpiSlink->pTxCpuBuffer,
+ TotalWordsRequested);
+
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount =
+ NV_MIN(WordsWritten*PacketsPerWord, PacketsRequested);
+ hRmSpiSlink->CurrTransInfo.pTxBuff =
+ hRmSpiSlink->pTxCpuBuffer + WordsWritten;
+ hRmSpiSlink->CurrTransInfo.TxPacketsRemaining = PacketsRequested -
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount;
+ }
+ else
+ {
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount =
+ NV_MIN(WordsWritten*PacketsPerWord, PacketsRequested);
+ }
+
+ hRmSpiSlink->hHwInterface->HwSetDmaTransferSizeFxn(&hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount);
+
+ hRmSpiSlink->hHwInterface->HwStartTransferFxn(&hRmSpiSlink->HwRegs, NV_TRUE);
+
+ return Error;
+}
+
+static NvError SlaveModeSpiStartReadWriteDma(
+ NvRmSpiHandle hRmSpiSlink,
+ NvBool IsReadTransfer,
+ NvU8 *pClientTxBuffer,
+ NvU32 PacketsRequested,
+ NvU32 IsPackedMode,
+ NvU32 PacketBitLength)
+{
+ NvError Error = NvSuccess;
+ NvU32 CurrentTransWord;
+ NvU32 BytesPerPacket;
+ NvU32 CurrentTransPacket;
+ NvU32 PacketsPerWord;
+ NvU32 TriggerLevel;
+ NvU32 TotalWordsRequested;
+ NvU32 NewBufferSize;
+
+ BytesPerPacket = (PacketBitLength + 7)/8;
+ PacketsPerWord = (IsPackedMode)? 4/BytesPerPacket: 1;
+ TotalWordsRequested = (PacketsRequested + PacketsPerWord -1)/PacketsPerWord;
+
+ hRmSpiSlink->IsUsingApbDma = NV_TRUE;
+
+ // Create the buffer if the required size of the buffer is not available.
+ if (hRmSpiSlink->DmaBufferSize < (TotalWordsRequested*4))
+ {
+ DestroyDmaTransferBuffer(hRmSpiSlink->hRmMemory,
+ hRmSpiSlink->pRxDmaBuffer, hRmSpiSlink->pTxDmaBuffer,
+ hRmSpiSlink->DmaBufferSize);
+ hRmSpiSlink->hRmMemory = NULL;
+ hRmSpiSlink->pRxDmaBuffer = NULL;
+ hRmSpiSlink->DmaRxBuffPhysAdd = 0;
+
+ // Better to findout the neearest 2powern
+ NewBufferSize = NV_MAX(hRmSpiSlink->DmaBufferSize, (TotalWordsRequested*4));
+ Error = CreateDmaTransferBuffer(hRmSpiSlink->hDevice, &hRmSpiSlink->hRmMemory,
+ &hRmSpiSlink->DmaRxBuffPhysAdd, (void **)&hRmSpiSlink->pRxDmaBuffer,
+ &hRmSpiSlink->DmaTxBuffPhysAdd, (void **)&hRmSpiSlink->pTxDmaBuffer,
+ NewBufferSize);
+
+ if (Error)
+ {
+ hRmSpiSlink->DmaBufferSize = 0;
+ return Error;
+ }
+ hRmSpiSlink->RxDmaReq.DestinationBufferPhyAddress = hRmSpiSlink->DmaRxBuffPhysAdd;
+ hRmSpiSlink->TxDmaReq.SourceBufferPhyAddress = hRmSpiSlink->DmaTxBuffPhysAdd;
+ hRmSpiSlink->DmaBufferSize = NewBufferSize;
+ }
+
+ hRmSpiSlink->CurrTransInfo.PacketsPerWord = PacketsPerWord;
+ hRmSpiSlink->CurrTransInfo.BytesPerPacket = BytesPerPacket;
+ hRmSpiSlink->CurrTransInfo.PacketBitLength = PacketBitLength;
+ hRmSpiSlink->CurrTransInfo.IsPackedMode = IsPackedMode;
+
+ CurrentTransPacket = NV_MIN((TotalWordsRequested*PacketsPerWord), PacketsRequested);
+
+ hRmSpiSlink->CurrTransInfo.PacketTransferred = 0;
+ hRmSpiSlink->CurrTransInfo.RxPacketsRemaining = 0;
+ hRmSpiSlink->CurrTransInfo.pRxBuff = NULL;
+ hRmSpiSlink->CurrTransInfo.CurrPacketCount = CurrentTransPacket;
+ hRmSpiSlink->CurrTransInfo.PacketRequested = CurrentTransPacket;
+ hRmSpiSlink->TxTransferStatus = NvSuccess;
+ hRmSpiSlink->RxTransferStatus = NvSuccess;
+
+ hRmSpiSlink->CurrTransInfo.pTxBuff = NULL;
+
+ CurrentTransWord = (CurrentTransPacket + PacketsPerWord -1)/PacketsPerWord;
+
+ TriggerLevel = (CurrentTransWord & 0x3)? 4: 16;
+ hRmSpiSlink->hHwInterface->HwSetTriggerLevelFxn(&hRmSpiSlink->HwRegs,
+ SerialHwFifo_Both , TriggerLevel);
+
+ hRmSpiSlink->hHwInterface->HwSetDmaTransferSizeFxn(&hRmSpiSlink->HwRegs,
+ CurrentTransPacket);
+ if (pClientTxBuffer)
+ {
+ MakeSlaveSpiBufferFromClientBuffer(pClientTxBuffer, hRmSpiSlink->pTxDmaBuffer,
+ CurrentTransPacket*BytesPerPacket,
+ PacketBitLength, IsPackedMode);
+ hRmSpiSlink->CurrTransInfo.pTxBuff = hRmSpiSlink->pTxDmaBuffer;
+ hRmSpiSlink->TxDmaReq.TransferSize = CurrentTransWord *4;
+ Error = NvRmDmaStartDmaTransfer(hRmSpiSlink->hRmTxDma, &hRmSpiSlink->TxDmaReq,
+ NvRmDmaDirection_Forward, 0, NULL);
+ do
+ {
+ if (hRmSpiSlink->hHwInterface->HwIsTransmitFifoFull(&hRmSpiSlink->HwRegs))
+ break;
+ } while(1);
+ }
+
+ if ((!Error) && (IsReadTransfer))
+ {
+ hRmSpiSlink->RxDmaReq.TransferSize = CurrentTransWord *4;
+ hRmSpiSlink->CurrTransInfo.RxPacketsRemaining = CurrentTransPacket;
+ hRmSpiSlink->CurrTransInfo.pRxBuff = hRmSpiSlink->pRxDmaBuffer;
+
+ Error = NvRmDmaStartDmaTransfer(hRmSpiSlink->hRmRxDma, &hRmSpiSlink->RxDmaReq,
+ NvRmDmaDirection_Forward, 0, NULL);
+ if ((Error) && (pClientTxBuffer))
+ NvRmDmaAbort(hRmSpiSlink->hRmTxDma);
+ }
+
+ if (!Error)
+ hRmSpiSlink->hHwInterface->HwStartTransferFxn(&hRmSpiSlink->HwRegs, NV_TRUE);
+ return Error;
+}
+
+static NvError SlaveModeSpiCompleteReadWrite(
+ NvRmSpiHandle hRmSpiSlink,
+ NvU8 *pClientRxBuffer,
+ NvU32 *pBytesTransferred,
+ NvU32 TimeoutMs)
+{
+ NvError Error = NvSuccess;
+ NvU32 TransferdPacket;
+ NvU32 ReqSizeInBytes;
+ NvU32 *pRxBuffer = NULL;
+
+ Error = WaitForTransferCompletion(hRmSpiSlink, TimeoutMs, NV_FALSE);
+ if (Error == NvError_Timeout)
+ {
+ TransferdPacket = hRmSpiSlink->CurrTransInfo.PacketTransferred;
+ }
+ else
+ {
+ Error = (hRmSpiSlink->RxTransferStatus)? hRmSpiSlink->RxTransferStatus:
+ hRmSpiSlink->TxTransferStatus;
+ if (Error)
+ TransferdPacket = hRmSpiSlink->CurrTransInfo.PacketTransferred;
+ else
+ TransferdPacket = hRmSpiSlink->CurrTransInfo.PacketRequested;
+ }
+ ReqSizeInBytes = NV_MIN(TransferdPacket, hRmSpiSlink->CurrTransInfo.PacketRequested)
+ *hRmSpiSlink->CurrTransInfo.BytesPerPacket;
+
+ if (pClientRxBuffer)
+ {
+ pRxBuffer = (hRmSpiSlink->IsUsingApbDma)?hRmSpiSlink->pRxDmaBuffer:
+ hRmSpiSlink->pRxCpuBuffer;
+ MakeSlaveClientBufferFromSpiBuffer(pClientRxBuffer,
+ pRxBuffer, ReqSizeInBytes,
+ hRmSpiSlink->CurrTransInfo.PacketBitLength,
+ hRmSpiSlink->CurrTransInfo.IsPackedMode);
+ }
+
+ *pBytesTransferred = ReqSizeInBytes;
+ hRmSpiSlink->hHwInterface->HwSetInterruptSourceFxn(&hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrentDirection, NV_FALSE);
+ hRmSpiSlink->hHwInterface->HwSetDataFlowFxn(&hRmSpiSlink->HwRegs,
+ hRmSpiSlink->CurrentDirection, NV_FALSE);
+ hRmSpiSlink->CurrentDirection = SerialHwDataFlow_None;
+ return Error;
+}
+
+
+static void
+InitSlinkInterface(
+ NvRmDeviceHandle hDevice,
+ HwInterface *pSlinkInterface)
+{
+ static SlinkCapabilities s_SpiCap[2];
+ SlinkCapabilities *pSpiCap = NULL;
+ static NvRmModuleCapability s_SpiCapList[] =
+ {
+ {1, 0, 0, &s_SpiCap[0]}, // AP15 version 1.0
+ {1, 1, 0, &s_SpiCap[1]}, // AP20 version 1.1
+ };
+
+ // (AP15) version 1.0
+ s_SpiCap[0].MajorVersion = 1;
+ s_SpiCap[0].MinorVersion = 0;
+
+ // (AP20) version 1.1
+ s_SpiCap[1].MajorVersion = 1;
+ s_SpiCap[1].MinorVersion = 1;
+
+ NV_ASSERT_SUCCESS(NvRmModuleGetCapabilities(hDevice, NVRM_MODULE_ID(NvRmModuleID_Slink, 0),
+ s_SpiCapList, NV_ARRAY_SIZE(s_SpiCapList), (void**)&(pSpiCap)));
+
+ NvRmPrivSpiSlinkInitSlinkInterface(&s_SlinkHwInterface);
+ if ((pSpiCap->MajorVersion == 1) && (pSpiCap->MinorVersion == 0))
+ {
+ NvRmPrivSpiSlinkInitSlinkInterface_v1_0(&s_SlinkHwInterface);
+ }
+ else // 1.1
+ {
+ NvRmPrivSpiSlinkInitSlinkInterface_v1_1(&s_SlinkHwInterface);
+ }
+}
+
+/**
+ * Initialize the spi info structure.
+ * Thread safety: Caller responsibity.
+ */
+NvError NvRmPrivSpiSlinkInit(NvRmDeviceHandle hDevice)
+{
+ NvError e;
+ NvU32 Index;
+
+ NV_ASSERT(NvRmModuleGetNumInstances(hDevice, NvRmModuleID_Spi) <= MAX_SPI_CONTROLLERS);
+ NV_ASSERT(NvRmModuleGetNumInstances(hDevice, NvRmModuleID_Slink) <= MAX_SLINK_CONTROLLERS);
+
+ NvRmPrivSpiSlinkInitSpiInterface(&s_SpiHwInterface);
+ InitSlinkInterface(hDevice, &s_SlinkHwInterface);
+
+ // Initialize all the parameters.
+ s_SpiSlinkInfo.hDevice = hDevice;
+
+ for (Index = 0; Index < MAX_SPI_SLINK_INSTANCE; ++Index)
+ s_SpiSlinkInfo.hSpiSlinkChannelList[Index] = NULL;
+
+ // Create the mutex to access the spi information.
+ NV_CHECK_ERROR(NvOsMutexCreate(&s_SpiSlinkInfo.hChannelAccessMutex));
+ return NvSuccess;
+}
+
+/**
+ * Destroy all the spi struture information. It frees all the allocated resource.
+ * Thread safety: Caller responsibity.
+ */
+void NvRmPrivSpiSlinkDeInit(void)
+{
+ NvU32 Index;
+
+ // Free all allocations.
+ NvOsMutexLock(s_SpiSlinkInfo.hChannelAccessMutex);
+ for (Index = 0; Index < MAX_SPI_SLINK_INSTANCE; ++Index)
+ {
+ if (s_SpiSlinkInfo.hSpiSlinkChannelList[Index] != NULL)
+ {
+ DestroySpiSlinkChannelHandle(s_SpiSlinkInfo.hSpiSlinkChannelList[Index]);
+ s_SpiSlinkInfo.hSpiSlinkChannelList[Index] = NULL;
+ }
+ }
+ NvOsMutexUnlock(s_SpiSlinkInfo.hChannelAccessMutex);
+
+ NvOsMutexDestroy(s_SpiSlinkInfo.hChannelAccessMutex);
+ s_SpiSlinkInfo.hChannelAccessMutex = NULL;
+ s_SpiSlinkInfo.hDevice = NULL;
+}
+
+/**
+ * Open the handle for the spi.
+ */
+NvError
+NvRmSpiOpen(
+ NvRmDeviceHandle hRmDevice,
+ NvU32 IoModule,
+ NvU32 InstanceId,
+ NvBool IsMasterMode,
+ NvRmSpiHandle * phRmSpi)
+{
+ NvError Error = NvSuccess;
+ NvRmSpiHandle hRmSpiSlink = NULL;
+ NvU32 ContInstanceId;
+ NvBool IsSpiChannel;
+
+ NV_ASSERT(phRmSpi);
+ NV_ASSERT(hRmDevice);
+
+ *phRmSpi = NULL;
+
+ IsSpiChannel = (IoModule == NvOdmIoModule_Sflash)? NV_TRUE: NV_FALSE;
+
+ // SPI controller does not support the slave mode
+ if ((IsSpiChannel) && (!IsMasterMode))
+ return NvError_NotSupported;
+
+ // 0 to (MAX_SPI_CONTROLLERS-1) will be the spi handles and then
+ // slink handles.
+ ContInstanceId = (IsSpiChannel)? InstanceId: (MAX_SPI_CONTROLLERS + InstanceId);
+
+ // Lock the spi info mutex access.
+ NvOsMutexLock(s_SpiSlinkInfo.hChannelAccessMutex);
+
+ if (s_SpiSlinkInfo.hSpiSlinkChannelList[ContInstanceId] == NULL)
+ {
+ Error = CreateSpiSlinkChannelHandle(hRmDevice, IsSpiChannel,
+ InstanceId, IsMasterMode, &hRmSpiSlink);
+ if (Error)
+ goto FuncExit;
+ }
+ else
+ {
+ // If the handle is not in master mode then not sharing across the
+ // client.
+ if (IsMasterMode)
+ {
+ hRmSpiSlink = s_SpiSlinkInfo.hSpiSlinkChannelList[ContInstanceId];
+ if (hRmSpiSlink->IsMasterMode)
+ {
+ hRmSpiSlink->OpenCount++;
+ }
+ else
+ {
+ Error = NvError_AlreadyAllocated;
+ goto FuncExit;
+ }
+ }
+ else
+ {
+ Error = NvError_AlreadyAllocated;
+ goto FuncExit;
+ }
+ }
+ *phRmSpi = hRmSpiSlink;
+
+FuncExit:
+ NvOsMutexUnlock(s_SpiSlinkInfo.hChannelAccessMutex);
+ return Error;
+}
+
+/**
+ * Close the spi handle.
+ */
+void NvRmSpiClose(NvRmSpiHandle hRmSpi)
+{
+ if (hRmSpi)
+ {
+ NvOsMutexLock(s_SpiSlinkInfo.hChannelAccessMutex);
+ hRmSpi->OpenCount--;
+ if (!hRmSpi->OpenCount)
+ DestroySpiSlinkChannelHandle(hRmSpi);
+
+ NvOsMutexUnlock(s_SpiSlinkInfo.hChannelAccessMutex);
+ }
+}
+
+void NvRmSpiMultipleTransactions(
+ NvRmSpiHandle hRmSpi,
+ NvU32 SpiPinMap,
+ NvU32 ChipSelectId,
+ NvU32 ClockSpeedInKHz,
+ NvU32 PacketSizeInBits,
+ NvRmSpiTransactionInfo *t,
+ NvU32 NumOfTransactions)
+{
+ NvError Error = NvSuccess;
+ NvBool IsPackedMode;
+ NvU32 BytesPerPacket;
+ NvU32 PacketsTransferred;
+ NvU32 PacketsPerWord;
+ NvU32 TotalPacketsRequsted;
+ NvU32 TotalWordsRequested;
+ NvU32 i;
+ NvRmDmaModuleID DmaModuleId;
+
+ NV_ASSERT(hRmSpi);
+ NV_ASSERT((PacketSizeInBits > 0) && (PacketSizeInBits <= 32));
+ NV_ASSERT(hRmSpi->IsMasterMode);
+
+ // Chip select should be supported by the odm.
+ NV_ASSERT(hRmSpi->IsChipSelSupported[ChipSelectId]);
+
+ // Proper spi pin map if it is multiplexed otherwise 0.
+ NV_ASSERT(((SpiPinMap) && (hRmSpi->SpiPinMap == NvOdmSpiPinMap_Multiplexed)) ||
+ ((!SpiPinMap) && (hRmSpi->SpiPinMap != NvOdmSpiPinMap_Multiplexed)));
+
+ // Select Packed mode for the 8/16 bit length.
+ BytesPerPacket = (PacketSizeInBits + 7)/8;
+ IsPackedMode = ((PacketSizeInBits == 8) || ((PacketSizeInBits == 16)));
+ PacketsPerWord = (IsPackedMode)? 4/BytesPerPacket: 1;
+
+ // Lock the channel access by other client till this client finishes the ops
+ NvOsMutexLock(hRmSpi->hChannelAccessMutex);
+ // Enable Power/Clock.
+ Error = SetPowerControl(hRmSpi, NV_TRUE);
+ if (Error != NvSuccess)
+ goto cleanup;
+
+ hRmSpi->CurrTransInfo.PacketsPerWord = PacketsPerWord;
+ if (SpiPinMap)
+ {
+ NvRmPinMuxConfigSelect(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, SpiPinMap);
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, SpiPinMap, NV_FALSE);
+ }
+ else
+ {
+ if (hRmSpi->IsIdleSignalTristate)
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, hRmSpi->SpiPinMap, NV_FALSE);
+ }
+
+ if (!Error)
+ Error = SetChipSelectSignalLevel(hRmSpi, ChipSelectId, ClockSpeedInKHz, NV_TRUE);
+ if (Error)
+ goto cleanup;
+
+ hRmSpi->hHwInterface->HwSetPacketLengthFxn(&hRmSpi->HwRegs,
+ PacketSizeInBits, IsPackedMode);
+
+ for (i=0; i< NumOfTransactions; i++, t++)
+ {
+ if (!((t->rxBuffer || t->txBuffer) && t->len))
+ continue;
+
+ hRmSpi->CurrTransInfo.pRxBuff = NULL;
+ hRmSpi->CurrTransInfo.RxPacketsRemaining = 0;
+ hRmSpi->CurrTransInfo.pTxBuff = NULL;
+ hRmSpi->CurrTransInfo.TxPacketsRemaining = 0;
+
+ /* If not packed mode, packet == word */
+ TotalPacketsRequsted = t->len/BytesPerPacket;
+ TotalWordsRequested = (TotalPacketsRequsted + PacketsPerWord -1)/PacketsPerWord;
+ NV_ASSERT((t->len % BytesPerPacket) == 0);
+ NV_ASSERT(TotalPacketsRequsted);
+
+ // Allocate the dma here if transaction size is more than cpu based
+ // transaction thresold.
+ if ((TotalWordsRequested > hRmSpi->HwRegs.MaxWordTransfer) &&
+ (hRmSpi->DmaBufferSize) &&
+ (!hRmSpi->IsApbDmaAllocated))
+ {
+ hRmSpi->TransCountFromLastDmaUsage = 0;
+ hRmSpi->IsApbDmaAllocated = NV_TRUE;
+ DmaModuleId = (hRmSpi->IsSpiChannel)?NvRmDmaModuleID_Spi: NvRmDmaModuleID_Slink;
+ Error = NvRmDmaAllocate(hRmSpi->hDevice, &hRmSpi->hRmRxDma,
+ NV_FALSE, NvRmDmaPriority_High, DmaModuleId,
+ hRmSpi->InstanceId);
+ if (!Error)
+ {
+ Error = NvRmDmaAllocate(hRmSpi->hDevice, &hRmSpi->hRmTxDma,
+ NV_FALSE, NvRmDmaPriority_High, DmaModuleId,
+ hRmSpi->InstanceId);
+ if (Error)
+ NvRmDmaFree(hRmSpi->hRmRxDma);
+ }
+ if (Error)
+ {
+ hRmSpi->hRmRxDma = NULL;
+ hRmSpi->hRmTxDma = NULL;
+ hRmSpi->IsApbDmaAllocated = NV_FALSE;
+ Error = NvSuccess;
+ }
+ }
+
+ hRmSpi->CurrentDirection = SerialHwDataFlow_None;
+ if (t->txBuffer)
+ hRmSpi->CurrentDirection |= SerialHwDataFlow_Tx;
+ if (t->rxBuffer)
+ hRmSpi->CurrentDirection |= SerialHwDataFlow_Rx;
+ hRmSpi->hHwInterface->HwSetDataFlowFxn(&hRmSpi->HwRegs,
+ hRmSpi->CurrentDirection, NV_TRUE);
+
+ if ((!hRmSpi->IsApbDmaAllocated) ||
+ (TotalWordsRequested <= hRmSpi->HwRegs.MaxWordTransfer))
+ {
+ hRmSpi->TransCountFromLastDmaUsage++;
+ Error = MasterModeReadWriteCpu(hRmSpi, t->rxBuffer, t->txBuffer,
+ TotalPacketsRequsted, &PacketsTransferred,
+ IsPackedMode, PacketSizeInBits);
+ }
+ else
+ {
+ hRmSpi->TransCountFromLastDmaUsage = 0;
+ Error = MasterModeReadWriteDma(hRmSpi, t->rxBuffer, t->txBuffer,
+ TotalPacketsRequsted, &PacketsTransferred,
+ IsPackedMode, PacketSizeInBits);
+ }
+ hRmSpi->hHwInterface->HwSetDataFlowFxn(&hRmSpi->HwRegs,
+ hRmSpi->CurrentDirection, NV_FALSE);
+ }
+ hRmSpi->CurrentDirection = SerialHwDataFlow_None;
+ (void)SetChipSelectSignalLevel(hRmSpi, ChipSelectId, ClockSpeedInKHz, NV_FALSE);
+
+cleanup:
+
+ // Re-tristate multi-plexed controllers, and re-multiplex the controller.
+ if (SpiPinMap)
+ {
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, SpiPinMap, NV_TRUE);
+
+ NvRmPinMuxConfigSelect(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, hRmSpi->SpiPinMap);
+ }
+ else
+ {
+ if (hRmSpi->IsIdleSignalTristate)
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, hRmSpi->SpiPinMap, NV_TRUE);
+ }
+ if ((hRmSpi->IsApbDmaAllocated) &&
+ (hRmSpi->TransCountFromLastDmaUsage > MAX_DMA_HOLD_TIME))
+ {
+ NvRmDmaFree(hRmSpi->hRmRxDma);
+ NvRmDmaFree(hRmSpi->hRmTxDma);
+ hRmSpi->hRmRxDma = NULL;
+ hRmSpi->hRmTxDma = NULL;
+ hRmSpi->IsApbDmaAllocated = NV_FALSE;
+ }
+
+ SetPowerControl(hRmSpi, NV_FALSE);
+ NvOsMutexUnlock(hRmSpi->hChannelAccessMutex);
+ NV_ASSERT(Error == NvSuccess);
+}
+
+/*
+ * EXPERIMENTAL: A hack has been added so as to improve the performance of ethernet.
+ * It is still experimental and will be done in a clean way once it is stable.
+ * This is an optimized version of function NvRmSpiMultipleTransactions.
+ * A separate function has been added instead of modifying an existing one since
+ * that would break the compatibility with other builds.
+ * Now it will power on the spi controller only in the beginning rather than doing
+ * it on every packet transfer.
+ */
+
+void NvRmSpiOptimizedMultipleTransactions(
+ NvRmSpiHandle hRmSpi,
+ NvU32 SpiPinMap,
+ NvU32 ChipSelectId,
+ NvU32 ClockSpeedInKHz,
+ NvU32 PacketSizeInBits,
+ NvRmSpiTransactionInfo *t,
+ NvU32 NumOfTransactions)
+{
+ NvError Error = NvSuccess;
+ NvBool IsPackedMode;
+ NvU32 BytesPerPacket;
+ NvU32 PacketsTransferred;
+ NvU32 PacketsPerWord;
+ NvU32 TotalPacketsRequsted;
+ NvU32 TotalWordsRequested;
+ NvU32 i;
+
+ NV_ASSERT(hRmSpi);
+ NV_ASSERT((PacketSizeInBits > 0) && (PacketSizeInBits <= 32));
+ NV_ASSERT(hRmSpi->IsMasterMode);
+
+ // Chip select should be supported by the odm.
+ NV_ASSERT(hRmSpi->IsChipSelSupported[ChipSelectId]);
+
+ // Proper spi pin map if it is multiplexed otherwise 0.
+ NV_ASSERT(((SpiPinMap) && (hRmSpi->SpiPinMap == NvOdmSpiPinMap_Multiplexed)) ||
+ ((!SpiPinMap) && (hRmSpi->SpiPinMap != NvOdmSpiPinMap_Multiplexed)));
+
+ // Select Packed mode for the 8/16 bit length.
+ IsPackedMode = ((PacketSizeInBits == 8) || ((PacketSizeInBits == 16)));
+ PacketsPerWord = (IsPackedMode) ? (PacketSizeInBits >> 3) : 1;
+ BytesPerPacket = (PacketSizeInBits + 7)/8;
+
+
+ // Lock the channel access by other client till this client finishes the ops
+ NvOsMutexLock(hRmSpi->hChannelAccessMutex);
+
+ // Enable Power/Clock.
+ Error = SetPowerControl(hRmSpi, NV_TRUE);
+ if (Error != NvSuccess)
+ goto cleanup;
+
+ if (SpiPinMap)
+ {
+ NvRmPinMuxConfigSelect(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, SpiPinMap);
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, SpiPinMap, NV_FALSE);
+ }
+ else
+ {
+ if (hRmSpi->IsIdleSignalTristate)
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, hRmSpi->SpiPinMap, NV_FALSE);
+ }
+
+ hRmSpi->CurrTransInfo.PacketsPerWord = PacketsPerWord;
+ if (!Error)
+ Error = SetChipSelectSignalLevel(hRmSpi, ChipSelectId, ClockSpeedInKHz, NV_TRUE);
+ if (Error)
+ goto cleanup;
+
+ hRmSpi->hHwInterface->HwSetPacketLengthFxn(&hRmSpi->HwRegs,
+ PacketSizeInBits, IsPackedMode);
+
+ for (i=0; i< NumOfTransactions; i++, t++)
+ {
+ if (!((t->rxBuffer || t->txBuffer) && t->len))
+ continue;
+
+ hRmSpi->CurrTransInfo.pRxBuff = NULL;
+ hRmSpi->CurrTransInfo.RxPacketsRemaining = 0;
+ hRmSpi->CurrTransInfo.pTxBuff = NULL;
+ hRmSpi->CurrTransInfo.TxPacketsRemaining = 0;
+
+
+ /* If not packed mode, packet == word */
+ TotalPacketsRequsted = t->len/BytesPerPacket;
+ TotalWordsRequested = (TotalPacketsRequsted + PacketsPerWord -1)/PacketsPerWord;
+ NV_ASSERT((t->len % BytesPerPacket) == 0);
+ NV_ASSERT(TotalPacketsRequsted);
+
+ hRmSpi->CurrentDirection = SerialHwDataFlow_None;
+ if (t->txBuffer)
+ hRmSpi->CurrentDirection |= SerialHwDataFlow_Tx;
+ if (t->rxBuffer)
+ hRmSpi->CurrentDirection |= SerialHwDataFlow_Rx;
+ hRmSpi->hHwInterface->HwSetDataFlowFxn(&hRmSpi->HwRegs,
+ hRmSpi->CurrentDirection, NV_TRUE);
+
+ if ((!hRmSpi->IsApbDmaAllocated) ||
+ (TotalWordsRequested <= hRmSpi->HwRegs.MaxWordTransfer))
+ {
+ Error = MasterModeReadWriteCpu(hRmSpi, t->rxBuffer, t->txBuffer,
+ TotalPacketsRequsted, &PacketsTransferred,
+ IsPackedMode, PacketSizeInBits);
+ }
+ else
+ {
+ Error = MasterModeReadWriteDma(hRmSpi, t->rxBuffer, t->txBuffer,
+ TotalPacketsRequsted, &PacketsTransferred,
+ IsPackedMode, PacketSizeInBits);
+ }
+ hRmSpi->hHwInterface->HwSetDataFlowFxn(&hRmSpi->HwRegs,
+ hRmSpi->CurrentDirection, NV_FALSE);
+ }
+ hRmSpi->CurrentDirection = SerialHwDataFlow_None;
+ (void)SetChipSelectSignalLevel(hRmSpi, ChipSelectId, ClockSpeedInKHz, NV_FALSE);
+
+cleanup:
+ // Re-tristate multi-plexed controllers, and re-multiplex the controller.
+ if (SpiPinMap)
+ {
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, SpiPinMap, NV_TRUE);
+
+ NvRmPinMuxConfigSelect(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, hRmSpi->SpiPinMap);
+ }
+ else
+ {
+ if (hRmSpi->IsIdleSignalTristate)
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, hRmSpi->SpiPinMap, NV_TRUE);
+ }
+ SetPowerControl(hRmSpi, NV_FALSE);
+ NvOsMutexUnlock(hRmSpi->hChannelAccessMutex);
+ NV_ASSERT(Error == NvSuccess);
+}
+/**
+ * Perform the data transfer.
+ */
+void NvRmSpiTransaction(
+ NvRmSpiHandle hRmSpi,
+ NvU32 SpiPinMap,
+ NvU32 ChipSelectId,
+ NvU32 ClockSpeedInKHz,
+ NvU8 *pReadBuffer,
+ NvU8 *pWriteBuffer,
+ NvU32 BytesRequested,
+ NvU32 PacketSizeInBits)
+{
+ NvError Error = NvSuccess;
+ NvBool IsPackedMode;
+ NvU32 BytesPerPackets;
+ NvU32 PacketsTransferred;
+ NvU32 PacketsPerWord;
+ NvU32 TotalPacketsRequsted;
+ NvU32 TotalWordsRequested;
+ NvRmDmaModuleID DmaModuleId;
+
+ NV_ASSERT(hRmSpi);
+ NV_ASSERT(pReadBuffer || pWriteBuffer);
+
+ // Packet size should be 1 to 32..
+ NV_ASSERT((PacketSizeInBits > 0) && (PacketSizeInBits <= 32));
+
+ NV_ASSERT(hRmSpi->IsMasterMode);
+
+ // Bytes requested should be multiple of of bytes per packets.
+ BytesPerPackets = (PacketSizeInBits + 7)/8;
+ TotalPacketsRequsted = BytesRequested/BytesPerPackets;
+ NV_ASSERT((BytesRequested % BytesPerPackets) == 0);
+ NV_ASSERT(TotalPacketsRequsted);
+
+ // Chip select should be supported by the odm.
+ NV_ASSERT(hRmSpi->IsChipSelSupported[ChipSelectId]);
+
+ // Proper spi pin map if it is multiplexed otherwise 0.
+ NV_ASSERT(((SpiPinMap) && (hRmSpi->SpiPinMap == NvOdmSpiPinMap_Multiplexed)) ||
+ ((!SpiPinMap) && (hRmSpi->SpiPinMap != NvOdmSpiPinMap_Multiplexed)));
+
+ // Select Packed mode for the 8/16 bit length.
+ IsPackedMode = ((PacketSizeInBits == 8) || ((PacketSizeInBits == 16)));
+ PacketsPerWord = (IsPackedMode)? 4/BytesPerPackets: 1;
+ TotalWordsRequested = (TotalPacketsRequsted + PacketsPerWord -1)/PacketsPerWord;
+
+#if !NV_OAL
+ // Lock the channel access by other client till this client finishes the ops
+ NvOsMutexLock(hRmSpi->hChannelAccessMutex);
+ // Enable Power/Clock.
+ Error = SetPowerControl(hRmSpi, NV_TRUE);
+ if (Error != NvSuccess)
+ goto cleanup;
+#endif
+ hRmSpi->CurrTransInfo.PacketsPerWord = PacketsPerWord;
+
+ // Enable the transmit if the Tx buffer is supplied.
+ hRmSpi->CurrentDirection = (pWriteBuffer)?SerialHwDataFlow_Tx: SerialHwDataFlow_None;
+
+ // Enable the receive if the Rx buffer is supplied.
+ if (pReadBuffer)
+ hRmSpi->CurrentDirection |= SerialHwDataFlow_Rx;
+
+ if (SpiPinMap)
+ {
+
+ NvRmPinMuxConfigSelect(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, SpiPinMap);
+
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, SpiPinMap, NV_FALSE);
+ }
+ else
+ {
+ if (hRmSpi->IsIdleSignalTristate)
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, hRmSpi->SpiPinMap, NV_FALSE);
+ }
+
+ hRmSpi->CurrTransInfo.pRxBuff = NULL;
+ hRmSpi->CurrTransInfo.RxPacketsRemaining = 0;
+ hRmSpi->CurrTransInfo.pTxBuff = NULL;
+ hRmSpi->CurrTransInfo.TxPacketsRemaining = 0;
+
+ TotalWordsRequested = (TotalPacketsRequsted + PacketsPerWord -1)/PacketsPerWord;
+
+ // Allocate the dma here if transaction size is more than cpu based
+ // transaction thresold.
+ if ((TotalWordsRequested > hRmSpi->HwRegs.MaxWordTransfer) &&
+ (hRmSpi->DmaBufferSize) &&
+ (!hRmSpi->IsApbDmaAllocated))
+ {
+ hRmSpi->TransCountFromLastDmaUsage = 0;
+ hRmSpi->IsApbDmaAllocated = NV_TRUE;
+ DmaModuleId = (hRmSpi->IsSpiChannel)?NvRmDmaModuleID_Spi: NvRmDmaModuleID_Slink;
+ Error = NvRmDmaAllocate(hRmSpi->hDevice, &hRmSpi->hRmRxDma,
+ NV_FALSE, NvRmDmaPriority_High, DmaModuleId,
+ hRmSpi->InstanceId);
+ if (!Error)
+ {
+ Error = NvRmDmaAllocate(hRmSpi->hDevice, &hRmSpi->hRmTxDma,
+ NV_FALSE, NvRmDmaPriority_High, DmaModuleId,
+ hRmSpi->InstanceId);
+ if (Error)
+ NvRmDmaFree(hRmSpi->hRmRxDma);
+ }
+ if (Error)
+ {
+ hRmSpi->hRmRxDma = NULL;
+ hRmSpi->hRmTxDma = NULL;
+ hRmSpi->IsApbDmaAllocated = NV_FALSE;
+ Error = NvSuccess;
+ }
+ }
+ Error = SetChipSelectSignalLevel(hRmSpi, ChipSelectId, ClockSpeedInKHz, NV_TRUE);
+ if (Error)
+ goto cleanup;
+
+ hRmSpi->hHwInterface->HwSetDataFlowFxn(&hRmSpi->HwRegs,
+ hRmSpi->CurrentDirection, NV_TRUE);
+
+ hRmSpi->hHwInterface->HwSetPacketLengthFxn(&hRmSpi->HwRegs,
+ PacketSizeInBits, IsPackedMode);
+
+ // Use cpu for less number of the data transfer.
+ if ((!hRmSpi->IsApbDmaAllocated) ||
+ (TotalWordsRequested <= hRmSpi->HwRegs.MaxWordTransfer))
+ {
+ hRmSpi->TransCountFromLastDmaUsage++;
+ Error = MasterModeReadWriteCpu(hRmSpi, pReadBuffer, pWriteBuffer,
+ TotalPacketsRequsted, &PacketsTransferred,
+ IsPackedMode, PacketSizeInBits);
+ }
+ else
+ {
+ hRmSpi->TransCountFromLastDmaUsage = 0;
+ Error = MasterModeReadWriteDma(hRmSpi, pReadBuffer, pWriteBuffer,
+ TotalPacketsRequsted, &PacketsTransferred,
+ IsPackedMode, PacketSizeInBits);
+ }
+
+ hRmSpi->hHwInterface->HwSetDataFlowFxn(&hRmSpi->HwRegs,
+ hRmSpi->CurrentDirection, NV_FALSE);
+ hRmSpi->CurrentDirection = SerialHwDataFlow_None;
+ (void)SetChipSelectSignalLevel(hRmSpi, ChipSelectId, ClockSpeedInKHz, NV_FALSE);
+
+cleanup:
+
+ // Re-tristate multi-plexed controllers, and re-multiplex the controller.
+ if (SpiPinMap)
+ {
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, SpiPinMap, NV_TRUE);
+
+ NvRmPinMuxConfigSelect(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, hRmSpi->SpiPinMap);
+ }
+ else
+ {
+ if (hRmSpi->IsIdleSignalTristate)
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, hRmSpi->SpiPinMap, NV_TRUE);
+ }
+
+ if ((hRmSpi->IsApbDmaAllocated) &&
+ (hRmSpi->TransCountFromLastDmaUsage > MAX_DMA_HOLD_TIME))
+ {
+ NvRmDmaFree(hRmSpi->hRmRxDma);
+ NvRmDmaFree(hRmSpi->hRmTxDma);
+ hRmSpi->hRmRxDma = NULL;
+ hRmSpi->hRmTxDma = NULL;
+ hRmSpi->IsApbDmaAllocated = NV_FALSE;
+ }
+
+#if !NV_OAL
+ SetPowerControl(hRmSpi, NV_FALSE);
+ NvOsMutexUnlock(hRmSpi->hChannelAccessMutex);
+#endif
+
+ NV_ASSERT(Error == NvSuccess);
+
+}
+
+/**
+ * Start the data trasfer in slave mode.
+ */
+NvError NvRmSpiStartTransaction(
+ NvRmSpiHandle hRmSpi,
+ NvU32 ChipSelectId,
+ NvU32 ClockSpeedInKHz,
+ NvBool IsReadTransfer,
+ NvU8 *pWriteBuffer,
+ NvU32 BytesRequested,
+ NvU32 PacketSizeInBits)
+{
+ NvError Error = NvSuccess;
+ NvBool IsPackedMode;
+ NvU32 BytesPerPackets;
+ NvU32 TotalWordsRequested;
+ NvU32 PacketsPerWord;
+ NvU32 TotalPacketsRequsted;
+
+ NV_ASSERT(hRmSpi);
+ NV_ASSERT((IsReadTransfer) || (pWriteBuffer));
+
+ // Packet size should be 1 to 32..
+ NV_ASSERT((PacketSizeInBits > 0) && (PacketSizeInBits <= 32));
+
+ // Transfer is allowed for the slave mode only from this API.
+ NV_ASSERT(!hRmSpi->IsMasterMode);
+
+ BytesPerPackets = (PacketSizeInBits + 7)/8;
+
+ // Packets should be byte alligned.
+ NV_ASSERT((BytesRequested % BytesPerPackets) == 0);
+
+ // Slave mode will take the configuration from the Chip select 0.
+ NV_ASSERT(hRmSpi->IsChipSelSupported[ChipSelectId]);
+
+ TotalPacketsRequsted = BytesRequested/BytesPerPackets;
+
+ // Select Packed mode for the 8/16 bit length.
+ // nonwordaligned packed mode is not supported then check for the wordaligend
+ // packets also.
+ if (hRmSpi->HwRegs.IsNonWordAlignedPackModeSupported)
+ {
+ IsPackedMode = ((PacketSizeInBits == 8) ||(PacketSizeInBits == 16));
+ }
+ else
+ {
+ IsPackedMode = (((PacketSizeInBits == 8) && (!(TotalPacketsRequsted & 0x3))) ||
+ ((PacketSizeInBits == 16) && (!(TotalPacketsRequsted & 0x1))));
+ }
+ PacketsPerWord = (IsPackedMode)? 4/BytesPerPackets: 1;
+ hRmSpi->CurrTransInfo.PacketsPerWord = PacketsPerWord;
+
+ TotalWordsRequested = (TotalPacketsRequsted + PacketsPerWord -1)/PacketsPerWord;
+
+ // Total word trasfer should be maximum of 16KW (64KB): Hw Dma constraints
+ NV_ASSERT(TotalWordsRequested <= MAXIMUM_SLAVE_TRANSFER_WORD);
+
+ // Packet requested should not be more than 64KB: Slink controller constraints
+ NV_ASSERT(TotalPacketsRequsted <= (1 << 16));
+
+ // If total transfer word is more than 64KB (dma max transfer) or
+ // number of packet requested is more than 64K (slink max packet transfer)
+ // then return the error as NotSupported.
+ if ((TotalWordsRequested > MAXIMUM_SLAVE_TRANSFER_WORD) ||
+ (TotalPacketsRequsted > (1 << 16)))
+ return NvError_NotSupported;
+
+
+ // Lock the channel access.
+ NvOsMutexLock(hRmSpi->hChannelAccessMutex);
+
+ if (hRmSpi->IsIdleSignalTristate)
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, hRmSpi->SpiPinMap, NV_FALSE);
+
+ // Enable Power/Clock.
+ Error = SetPowerControl(hRmSpi, NV_TRUE);
+
+ if (!Error)
+ Error = SetChipSelectSignalLevel(hRmSpi, ChipSelectId, ClockSpeedInKHz, NV_TRUE);
+
+ if (Error)
+ goto cleanup;
+
+ hRmSpi->CurrentDirection = (IsReadTransfer)?SerialHwDataFlow_Rx : SerialHwDataFlow_None;
+ if (pWriteBuffer)
+ hRmSpi->CurrentDirection |= SerialHwDataFlow_Tx;
+
+ // Set the data direction
+ hRmSpi->hHwInterface->HwSetDataFlowFxn(&hRmSpi->HwRegs,
+ hRmSpi->CurrentDirection, NV_TRUE);
+
+ // Use only interrupt mode for transfer
+ hRmSpi->hHwInterface->HwSetInterruptSourceFxn(&hRmSpi->HwRegs,
+ hRmSpi->CurrentDirection, NV_TRUE);
+
+ hRmSpi->hHwInterface->HwSetPacketLengthFxn(&hRmSpi->HwRegs,
+ PacketSizeInBits, IsPackedMode);
+
+ // Use cpu if the dma is not allocated or the transfer size is less than
+ // one fifo depth
+ if ((!hRmSpi->IsApbDmaAllocated) ||
+ (TotalWordsRequested <= hRmSpi->HwRegs.MaxWordTransfer))
+ {
+ // Non dma mode: The maximum word transfer is the fifo depth.
+ // The word requested should be less than the maximum one transaction.
+ // We can not split the slave transaction in multiple small transactions
+ NV_ASSERT(TotalWordsRequested <= hRmSpi->HwRegs.MaxWordTransfer);
+
+ Error = SlaveModeSpiStartReadWriteCpu(hRmSpi, IsReadTransfer, pWriteBuffer,
+ TotalPacketsRequsted, IsPackedMode, PacketSizeInBits);
+ }
+ else
+ {
+ Error = SlaveModeSpiStartReadWriteDma(hRmSpi, IsReadTransfer, pWriteBuffer,
+ TotalPacketsRequsted, IsPackedMode, PacketSizeInBits);
+ }
+
+ if (!Error)
+ return Error;
+cleanup:
+
+ (void)SetChipSelectSignalLevel(hRmSpi, ChipSelectId, ClockSpeedInKHz, NV_FALSE);
+
+ if (hRmSpi->IsIdleSignalTristate)
+ NvRmPinMuxConfigSetTristate(hRmSpi->hDevice,hRmSpi->RmIoModuleId,
+ hRmSpi->InstanceId, hRmSpi->SpiPinMap, NV_TRUE);
+ SetPowerControl(hRmSpi, NV_FALSE);
+ NvOsMutexUnlock(hRmSpi->hChannelAccessMutex);
+ return Error;
+}
+
+NvError
+NvRmSpiGetTransactionData(
+ NvRmSpiHandle hRmSpiSlink,
+ NvU8 *pReadBuffer,
+ NvU32 BytesRequested,
+ NvU32 *pBytesTransfererd,
+ NvU32 WaitTimeout)
+{
+ NvError Error = NvSuccess;
+
+ NV_ASSERT(pBytesTransfererd);
+ NV_ASSERT((pReadBuffer && (hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Rx )) ||
+ ((!pReadBuffer) && (!(hRmSpiSlink->CurrentDirection & SerialHwDataFlow_Rx))));
+
+ if (hRmSpiSlink->CurrentDirection == SerialHwDataFlow_None)
+ return NvError_InvalidState;
+
+ Error = SlaveModeSpiCompleteReadWrite(hRmSpiSlink, pReadBuffer,
+ pBytesTransfererd, WaitTimeout);
+
+ (void)SetChipSelectSignalLevel(hRmSpiSlink, hRmSpiSlink->CurrTransferChipSelId,
+ 0, NV_FALSE);
+
+ if (hRmSpiSlink->IsIdleSignalTristate)
+ NvRmPinMuxConfigSetTristate(hRmSpiSlink->hDevice,hRmSpiSlink->RmIoModuleId,
+ hRmSpiSlink->InstanceId, hRmSpiSlink->SpiPinMap, NV_TRUE);
+
+ // Disable Power/Clock.
+ SetPowerControl(hRmSpiSlink, NV_FALSE);
+ NvOsMutexUnlock(hRmSpiSlink->hChannelAccessMutex);
+ return Error;
+}
+
+void
+NvRmSpiSetSignalMode(
+ NvRmSpiHandle hRmSpi,
+ NvU32 ChipSelectId,
+ NvU32 SpiSignalMode)
+{
+ NV_ASSERT(hRmSpi);
+ if (hRmSpi->IsChipSelSupported[ChipSelectId])
+ {
+ NvOsMutexLock(hRmSpi->hChannelAccessMutex);
+ hRmSpi->DeviceInfo[ChipSelectId].SignalMode = (NvOdmQuerySpiSignalMode)SpiSignalMode;
+ NvOsMutexUnlock(hRmSpi->hChannelAccessMutex);
+ }
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_slink.h b/arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_slink.h
new file mode 100644
index 000000000000..3147c58ed707
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_slink.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_RM_SPI_SLINK_H
+#define INCLUDED_RM_SPI_SLINK_H
+
+#include "nvrm_spi.h"
+
+typedef struct NvRmSpiTransactionInfoRec
+{
+ NvU8 *rxBuffer;
+ NvU8 *txBuffer;
+ NvU32 len;
+} NvRmSpiTransactionInfo;
+
+
+void NvRmSpiMultipleTransactions(
+ NvRmSpiHandle hRmSpi,
+ NvU32 SpiPinMap,
+ NvU32 ChipSelectId,
+ NvU32 ClockSpeedInKHz,
+ NvU32 PacketSizeInBits,
+ NvRmSpiTransactionInfo *t,
+ NvU32 NumOfTransactions);
+
+void NvRmSpiOptimizedMultipleTransactions(
+ NvRmSpiHandle hRmSpi,
+ NvU32 SpiPinMap,
+ NvU32 ChipSelectId,
+ NvU32 ClockSpeedInKHz,
+ NvU32 PacketSizeInBits,
+ NvRmSpiTransactionInfo *t,
+ NvU32 NumOfTransactions);
+
+
+#endif // INCLUDED_RM_SPI_SLINK_H
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_slink_hw_private.h b/arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_slink_hw_private.h
new file mode 100644
index 000000000000..29bec8628e0a
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/rm_spi_slink_hw_private.h
@@ -0,0 +1,391 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ * @brief <b>nVIDIA Driver Development Kit:
+ * Private functions for the spi Rm driver</b>
+ *
+ * @b Description: Defines the private interfacing functions for the spi
+ * hw interface.
+ *
+ */
+
+#ifndef INCLUDED_RMSPI_HW_PRIVATE_H
+#define INCLUDED_RMSPI_HW_PRIVATE_H
+
+/**
+ * @defgroup nvrm_spi Synchrnous Peripheral Interface(SPI) Controller hw
+ * interface API
+ *
+ * This is the synchrnous peripheral interface (SPI) hw interface controller api
+ * which communicate to the device/other processor using the spi protocols.
+ *
+ * @ingroup nvddk_modules
+ * @{
+ *
+ */
+
+#include "nvcommon.h"
+#include "nvrm_init.h"
+#include "nvodm_query.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * Combines the spi hw register states.
+ */
+typedef struct
+{
+ NvU32 Command;
+ NvU32 Status;
+ NvU32 DmaControl;
+} SpiHwRegisters;
+
+/**
+ * Combines the slink hw register states.
+ */
+typedef struct
+{
+ NvU32 Command1;
+ NvU32 Command2;
+ NvU32 Status;
+ NvU32 DmaControl;
+} SlinkHwRegisters;
+
+/**
+ * Making the union of the spi/slink hw register states.
+ */
+typedef union
+{
+ SpiHwRegisters SpiRegs;
+ SlinkHwRegisters SlinkRegs;
+} SerialHwRegistersState;
+
+
+/**
+ * Combines the definition of the spi register and modem signals.
+ */
+typedef struct
+{
+ // Serial channel Id.
+ NvU32 InstanceId;
+
+ // Virtual base address of the spi hw register.
+ NvU32 *pRegsBaseAdd;
+
+ NvRmPhysAddr HwTxFifoAdd;
+
+ NvRmPhysAddr HwRxFifoAdd;
+
+ NvU32 RegBankSize;
+
+ NvBool IsPackedMode;
+
+ NvU32 PacketLength;
+
+ NvOdmQuerySpiSignalMode CurrSignalMode;
+
+ NvOdmQuerySpiSignalMode IdleSignalMode;
+
+ NvBool IsIdleDataOutHigh;
+
+ NvBool IsLsbFirst;
+
+ SerialHwRegistersState HwRegs;
+
+ NvU32 MaxWordTransfer;
+ NvBool IsMasterMode;
+
+ // Tells whethe the non word aligned packet size is supported or not.
+ // If it is supported then we need not to do any sw workaround otherwise
+ // Transfer the nearest word aligned packet using the packed mode and
+ // remaining as non-packed format.
+ NvBool IsNonWordAlignedPackModeSupported;
+} SerialHwRegisters;
+
+/**
+ * Combines the spi hw data flow direction where it is the receive side
+ * or transmit side.
+ */
+typedef enum
+{
+ // No data transfer.
+ SerialHwDataFlow_None = 0x0,
+
+ // Receive data flow.
+ SerialHwDataFlow_Rx = 0x1,
+
+ // Transmit data flow.
+ SerialHwDataFlow_Tx = 0x2,
+
+ SerialHwDataFlow_Force32 = 0x7FFFFFFF
+} SerialHwDataFlow;
+
+/**
+ * Combines the spi interrupt reasons.
+ */
+typedef enum
+{
+ // No Serial interrupt reason.
+ SerialHwIntReason_None = 0x0,
+
+ // Receive error Serial interrupt reason.
+ SerialHwIntReason_RxError = 0x1,
+
+ // Transmit Error spi interrupt reason.
+ SerialHwIntReason_TxError = 0x2,
+
+ // Transfer complete interrupt reason.
+ SerialHwIntReason_TransferComplete = 0x4,
+
+ SerialHwIntReason_Force32 = 0x7FFFFFFF
+} SerialHwIntReason;
+
+/**
+ * Combines the spi hw fifo type.
+ */
+typedef enum
+{
+ // Receive fifo type.
+ SerialHwFifo_Rx = 0x1,
+
+ // Transmit fifo type.
+ SerialHwFifo_Tx = 0x2,
+
+ // Both Rx and Tx fifo
+ SerialHwFifo_Both = 0x3,
+
+ SerialHwFifo_Force32 = 0x7FFFFFFF
+
+} SerialHwFifo;
+
+
+// The structure of the function pointers to provide the interface to access
+// the spi/slink hw registers and their property.
+typedef struct
+{
+ /**
+ * Initialize the spi register.
+ */
+ void (* HwRegisterInitializeFxn)(NvU32 SerialChannelId, SerialHwRegisters *pHwRegs);
+
+ /**
+ * Initialize the spi controller.
+ */
+ void (* HwControllerInitializeFxn)(SerialHwRegisters *pHwRegs);
+
+ /**
+ * Set the functional mode whether this is the master or slave mode.
+ */
+ void (* HwSetFunctionalModeFxn)(SerialHwRegisters *pHwRegs, NvBool IsMasterMode);
+
+ /**
+ * Set the signal mode of communication whether this is the mode 0, 1, 2 or 3.
+ */
+ void (* HwSetSignalModeFxn)(SerialHwRegisters *pHwRegs, NvOdmQuerySpiSignalMode SignalMode);
+
+ /**
+ * Reset the fifo
+ */
+ void (* HwResetFifoFxn)(SerialHwRegisters *pHwRegs, SerialHwFifo FifoType);
+
+ /**
+ * Find out whether transmit fifo is full or not.
+ */
+ NvBool (* HwIsTransmitFifoFull)(SerialHwRegisters *pHwRegs);
+
+ /**
+ * Set the transfer order whether the bit will start from the lsb or from
+ * msb.
+ */
+ void (* HwSetTransferBitOrderFxn)(SerialHwRegisters *pHwRegs, NvBool IsLsbFirst);
+
+ /**
+ * Start the transfer of the communication.
+ */
+ void (* HwStartTransferFxn)(SerialHwRegisters *pHwRegs, NvBool IsReconfigure);
+
+ /**
+ * Enable/disable the data transfer flow.
+ */
+ void
+ (* HwSetDataFlowFxn)(
+ SerialHwRegisters *pHwRegs,
+ SerialHwDataFlow DataFlow,
+ NvBool IsEnable);
+
+ /**
+ * Set the chip select signal level to be default based on device during the
+ * initialization.
+ */
+ void
+ (* HwSetChipSelectDefaultLevelFxn)(
+ SerialHwRegisters *pHwRegs,
+ NvU32 ChipSelectId,
+ NvBool IsHigh);
+
+ /**
+ * Set the chip select signal level.
+ */
+ void
+ (* HwSetChipSelectLevelFxn)(
+ SerialHwRegisters *pHwRegs,
+ NvU32 ChipSelectId,
+ NvBool IsHigh);
+
+
+ /**
+ * Set the packet length.
+ */
+ void
+ (* HwSetPacketLengthFxn)(
+ SerialHwRegisters *pHwRegs,
+ NvU32 PacketLength,
+ NvBool IsPackedMode);
+
+ /**
+ * Set the Dma transfer size.
+ */
+ void
+ (* HwSetDmaTransferSizeFxn)(
+ SerialHwRegisters *pHwRegs,
+ NvU32 DmaBlockSize);
+
+ /**
+ * Get the transferred packet count.
+ */
+ NvU32 (* HwGetTransferdCountFxn)(SerialHwRegisters *pHwRegs);
+
+
+ /**
+ * Set the trigger level.
+ */
+ void
+ (* HwSetTriggerLevelFxn)(
+ SerialHwRegisters *pHwRegs,
+ SerialHwFifo FifoType,
+ NvU32 TriggerLevel);
+
+ /**
+ * Write into the transmit fifo register.
+ * returns the number of words written.
+ */
+ NvU32
+ (* HwWriteInTransmitFifoFxn)(
+ SerialHwRegisters *pHwRegs,
+ NvU32 *pTxBuff,
+ NvU32 WordRequested);
+
+ /**
+ * Read the data from the receive fifo.
+ * Returns the number of words it read.
+ */
+ NvU32
+ (* HwReadFromReceiveFifoFxn)(
+ SerialHwRegisters *pHwRegs,
+ NvU32 *pRxBuff,
+ NvU32 WordRequested);
+
+ /**
+ * Enable/disable the interrupt source.
+ */
+ void
+ (* HwSetInterruptSourceFxn)(
+ SerialHwRegisters *pHwRegs,
+ SerialHwDataFlow DataDirection,
+ NvBool IsEnable);
+
+ /**
+ * Get the transfer status.
+ */
+ NvError
+ (* HwGetTransferStatusFxn)(
+ SerialHwRegisters *pHwRegs,
+ SerialHwDataFlow DataDirection);
+
+ /**
+ * Clear the transfer status.
+ */
+ void
+ (* HwClearTransferStatusFxn)(
+ SerialHwRegisters *pHwRegs,
+ SerialHwDataFlow DataDirection);
+
+ /**
+ * Set the number of transfers the CS should stay low for transfer sizes more
+ * than 32.words This will enable to do the trasnfer of word sizes > 32 without
+ * using apb-dma
+ */
+ void
+ (* HwSetCSActiveForTotalWordsFxn)(
+ SerialHwRegisters *pHwRegs,
+ NvU32 TotalWords);
+
+ /**
+ * Check whether transfer is completed or not.
+ */
+ NvBool (* HwIsTransferCompletedFxn)( SerialHwRegisters *pHwRegs);
+} HwInterface, *HwInterfaceHandle;
+
+
+/**
+ * Initialize the spi intterface for the hw access.
+ */
+void NvRmPrivSpiSlinkInitSpiInterface(HwInterface *pSpiInterface);
+
+/**
+ * Initialize the slink intterface for the hw access which are common across
+ * the version.
+ */
+void NvRmPrivSpiSlinkInitSlinkInterface(HwInterface *pSpiInterface);
+
+/**
+ * Initialize the slink interface of version 1.0 for the hw access.
+ */
+void NvRmPrivSpiSlinkInitSlinkInterface_v1_0(HwInterface *pSlinkInterface);
+
+
+/**
+ * Initialize the ap20 slink interface of version 1.1 for the hw access.
+ */
+void NvRmPrivSpiSlinkInitSlinkInterface_v1_1(HwInterface *pSlinkInterface);
+
+/** @} */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif // INCLUDED_RMSPI_HW_PRIVATE_H
diff --git a/arch/arm/mach-tegra/nvrm/io/ap20/Makefile b/arch/arm/mach-tegra/nvrm/io/ap20/Makefile
new file mode 100644
index 000000000000..6dc5ecb4151f
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap20/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -DNV_IS_AVP=0
+ccflags-y += -DNV_OAL=0
+ccflags-y += -DNV_USE_FUSE_CLOCK_ENABLE=0
+ifeq ($(CONFIG_MACH_TEGRA_GENERIC_DEBUG),y)
+ccflags-y += -DNV_DEBUG=1
+else
+ccflags-y += -DNV_DEBUG=0
+endif
+
+obj-y += ap20rm_i2c.o
+obj-y += ap20rm_pcie.o
+obj-y += ap20rm_slink_hw_private.o
+obj-y += ap20rm_owr.o
diff --git a/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_i2c.c b/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_i2c.c
new file mode 100644
index 000000000000..0043b7410542
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_i2c.c
@@ -0,0 +1,1486 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/** @file
+ * @brief <b>NVIDIA Driver Development Kit: I2C API</b>
+ *
+ * @b Description: Contains the NvRM I2C implementation. for Ap20
+ */
+
+#include "nvrm_i2c.h"
+#include "nvrm_i2c_private.h"
+#include "nvrm_drf.h"
+#include "nvos.h"
+#include "nvrm_module.h"
+#include "ap20/ari2c.h"
+#include "nvrm_hardware_access.h"
+#include "nvrm_power.h"
+#include "nvrm_interrupt.h"
+#include "nvassert.h"
+#include "ap20/ardvc.h"
+#include "nvodm_query_pinmux.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_hwintf.h"
+
+/* Register access Macros */
+#define I2C_REGR(c, reg) NV_REGR((c)->hRmDevice, (c)->ModuleId, (c)->Instance, \
+ (c)->I2cRegisterOffset + (((c)->ModuleId == NvRmModuleID_Dvc) ? DVC_##reg##_0 : \
+ I2C_##reg##_0))
+
+#define I2C_REGW(c, reg, val) \
+ do { \
+ NV_REGW((c)->hRmDevice, (c)->ModuleId, (c)->Instance, \
+ ((c)->I2cRegisterOffset + (((c)->ModuleId == NvRmModuleID_Dvc) ? DVC_##reg##_0 : \
+ I2C_##reg##_0)), (val)); \
+ } while(0)
+
+#define DVC_REGR(c, reg) NV_REGR((c)->hRmDevice, NvRmModuleID_Dvc, (c)->Instance, \
+ DVC_##reg##_0)
+#define DVC_REGW(c, reg, val) \
+ do { \
+ NV_REGW((c)->hRmDevice, NvRmModuleID_Dvc, (c)->Instance, \
+ DVC_##reg##_0, val); \
+ } while(0)
+
+
+/* Register access Macros */
+#define I2C2_REGR(c, reg) NV_REGR((c)->hRmDevice, (c)->ModuleId, (c)->Instance, \
+ (c)->I2cRegisterOffset + I2C_##reg##_0 )
+
+#define I2C2_REGW(c, reg, val) \
+ do { \
+ NV_REGW((c)->hRmDevice, (c)->ModuleId, (c)->Instance, \
+ ((c)->I2cRegisterOffset + I2C_##reg##_0), (val) ); \
+ }while(0);
+
+
+#define DEBUG_SEND_PROCESS 0
+#define DEBUG_READ_PROCESS 0
+#define DEBUG_TRACE_PROCESS 0
+
+#if DEBUG_SEND_PROCESS
+#define DEBUG_I2C_SEND(Expr, Format) \
+ do { \
+ if (Expr) \
+ { \
+ NvOsDebugPrintf Format; \
+ } \
+ } while(0)
+#else
+#define DEBUG_I2C_SEND(Expr, Format)
+#endif
+
+#if DEBUG_READ_PROCESS
+#define DEBUG_I2C_READ(Expr, Format) \
+ do { \
+ if (Expr) \
+ { \
+ NvOsDebugPrintf Format; \
+ } \
+ } while(0)
+#else
+#define DEBUG_I2C_READ(Expr, Format)
+#endif
+
+#if DEBUG_TRACE_PROCESS
+#define DEBUG_I2C_TRACE(Expr, Format) \
+ do { \
+ if (Expr) \
+ { \
+ NvOsDebugPrintf Format; \
+ } \
+ } while(0)
+#else
+#define DEBUG_I2C_TRACE(Expr, Format)
+#endif
+
+// The maximum transfer size by one transaction.
+enum {MAX_I2C_ONE_TRANSACTION_SIZE = 0x1000}; // 4KB
+
+// The maximum request size for one transaction using the dma.
+// + 64 bytes for the packet header.
+enum {DEFAULT_I2C_DMA_BUFFER_SIZE = (MAX_I2C_ONE_TRANSACTION_SIZE + 0x40)}; // 4KB
+
+// The default request size for one transaction using the nondma mode.
+enum {DEFAULT_I2C_CPU_BUFFER_SIZE = MAX_I2C_ONE_TRANSACTION_SIZE};
+
+// Wait time to poll the status for completion.
+enum { I2C_POLLING_TIMEOUT_STEP_USEC = 50};
+
+// I2C fifo depth.
+enum { I2C_FIFO_DEPTH = 8};
+
+// I2C Dma/CPU based seletion thresold.
+enum { I2C_MAX_WORD_TO_USE_CPU = 16};
+
+// Holding the apb dma for the continuous non dma transaction count
+enum {HOLDING_DMA_TRANSACTION_COUNT = 15};
+#define I2C_FIFO_ERROR_INTERRUPTS (NV_DRF_DEF(I2C, INTERRUPT_STATUS_REGISTER, TFIFO_OVF, SET) | \
+ NV_DRF_DEF(I2C, INTERRUPT_STATUS_REGISTER, RFIFO_UNF, SET))
+
+#if NV_OAL
+#define RESET_SEMA_COUNT(hSema)
+#else
+#define RESET_SEMA_COUNT(hSema) \
+ while(NvOsSemaphoreWaitTimeout(hSema, 0) != NvError_Timeout)
+#endif
+
+// Convert the number of bytes to word.
+#define BYTES_TO_WORD(ReqSize) (((ReqSize) + 3) >> 2)
+
+/**
+ * Create the dma buffer memory handle.
+ */
+static NvError
+CreateDmaBufferMemoryHandle(
+ NvRmDeviceHandle hDevice,
+ NvRmMemHandle *phNewMemHandle,
+ NvRmPhysAddr *pNewMemAddr,
+ NvU32 BufferSize)
+{
+ NvError Error = NvSuccess;
+ NvRmMemHandle hNewMemHandle = NULL;
+ static const NvRmHeap HeapProperty[] =
+ {
+ NvRmHeap_ExternalCarveOut,
+ NvRmHeap_External,
+ NvRmHeap_GART,
+ };
+
+ // Initialize the memory handle with NULL
+ *phNewMemHandle = NULL;
+
+ /// Create memory handle
+ Error = NvRmMemHandleCreate(hDevice, &hNewMemHandle, BufferSize);
+
+ // Allocates the memory from the sdram
+ if (!Error)
+ Error = NvRmMemAlloc(hNewMemHandle, HeapProperty,
+ NV_ARRAY_SIZE(HeapProperty), 4, NvOsMemAttribute_Uncached);
+
+ // Pin the memory allocation so that it should not move by memory manager.
+ if (!Error)
+ *pNewMemAddr = NvRmMemPin(hNewMemHandle);
+
+ // If error then free the memory allocation and memory handle.
+ if (Error)
+ {
+ NvRmMemHandleFree(hNewMemHandle);
+ hNewMemHandle = NULL;
+ }
+
+ *phNewMemHandle = hNewMemHandle;
+ return Error;
+}
+
+ /**
+ * Destroy the dma buffer memory handle.
+ */
+static void DestroyDmaBufferMemoryHandle(NvRmMemHandle hMemHandle)
+{
+ // Can accept the null parameter. If it is not null then only destroy.
+ if (hMemHandle)
+ {
+ // Unpin the memory allocation.
+ NvRmMemUnpin(hMemHandle);
+
+ // Free the memory handle.
+ NvRmMemHandleFree(hMemHandle);
+ }
+}
+
+/**
+ * Create the dma transfer buffer for the given handles.
+ */
+static NvError
+CreateDmaTransferBuffer(
+ NvRmDeviceHandle hRmDevice,
+ NvRmMemHandle *phRmMemory,
+ NvRmPhysAddr *pBuffPhysAddr,
+ void **pBuffPtr,
+ NvU32 BufferSize)
+{
+ NvError Error = NvSuccess;
+ NvRmMemHandle hRmMemory = NULL;
+ NvRmPhysAddr BuffPhysAddr;
+
+ // Reset all the members realted to the dma buffer.
+ BuffPhysAddr = 0;
+
+ *phRmMemory = NULL;
+ *pBuffPtr = (void *)NULL;
+ *pBuffPhysAddr = 0;
+
+ // Create the dma buffer memory for receive and transmit.
+ // It will be double of the OneBufferSize
+ Error = CreateDmaBufferMemoryHandle(hRmDevice, &hRmMemory,
+ &BuffPhysAddr, BufferSize);
+ if (!Error)
+ {
+ // 0 to OneBufferSize-1 is buffer 1 and OneBufferSize to 2*OneBufferSize
+ // is second buffer.
+ Error = NvRmMemMap(hRmMemory, 0, BufferSize,
+ NVOS_MEM_READ_WRITE, pBuffPtr);
+ // If error then free the allocation and reset all changed value.
+ if (Error)
+ {
+ DestroyDmaBufferMemoryHandle(hRmMemory);
+ hRmMemory = NULL;
+ *pBuffPtr = (void *)NULL;
+ return Error;
+ }
+ *phRmMemory = hRmMemory;
+ *pBuffPhysAddr = BuffPhysAddr;
+ }
+ return Error;
+}
+
+/**
+ * Destroy the dma transfer buffer.
+ */
+static void
+DestroyDmaTransferBuffer(
+ NvRmMemHandle hRmMemory,
+ void *pBuffPtr,
+ NvU32 BufferSize)
+{
+ if (hRmMemory)
+ {
+ if (pBuffPtr)
+ NvRmMemUnmap(hRmMemory, pBuffPtr, BufferSize);
+ DestroyDmaBufferMemoryHandle(hRmMemory);
+ }
+}
+
+static void SetTxFifoTriggerLevel(NvRmI2cControllerHandle hRmI2cCont, NvU32 TrigLevel)
+{
+ NvU32 FifoControlReg;
+ NvU32 ActualTriggerLevel = NV_MIN(I2C_FIFO_DEPTH, TrigLevel);
+
+ if (!ActualTriggerLevel)
+ return;
+
+ FifoControlReg = I2C_REGR (hRmI2cCont, FIFO_CONTROL);
+ FifoControlReg = NV_FLD_SET_DRF_NUM(I2C, FIFO_CONTROL, TX_FIFO_TRIG,
+ ActualTriggerLevel - 1, FifoControlReg);
+ DEBUG_I2C_SEND(1, ("Tx Fifo Control 0x%08x\n", FifoControlReg));
+ I2C_REGW (hRmI2cCont, FIFO_CONTROL, FifoControlReg);
+}
+
+static void SetRxFifoTriggerLevel(NvRmI2cControllerHandle hRmI2cCont, NvU32 TrigLevel)
+{
+ NvU32 FifoControlReg;
+ NvU32 ActualTriggerLevel = NV_MIN(I2C_FIFO_DEPTH, TrigLevel);
+
+ if (!ActualTriggerLevel)
+ return;
+
+ FifoControlReg = I2C_REGR (hRmI2cCont, FIFO_CONTROL);
+ FifoControlReg = NV_FLD_SET_DRF_NUM(I2C, FIFO_CONTROL, RX_FIFO_TRIG,
+ ActualTriggerLevel - 1, FifoControlReg);
+ DEBUG_I2C_READ(1, ("Rx Fifo Control 0x%08x\n", FifoControlReg));
+ I2C_REGW (hRmI2cCont, FIFO_CONTROL, FifoControlReg);
+}
+
+
+static void ResetTxFifo(NvRmI2cControllerHandle hRmI2cCont)
+{
+ NvU32 FifoControlReg;
+
+ FifoControlReg = I2C_REGR (hRmI2cCont, FIFO_CONTROL);
+ FifoControlReg = NV_FLD_SET_DRF_DEF(I2C, FIFO_CONTROL, TX_FIFO_FLUSH,
+ SET, FifoControlReg);
+ I2C_REGW (hRmI2cCont, FIFO_CONTROL, FifoControlReg);
+ do
+ {
+ NvOsWaitUS(10);
+ FifoControlReg = I2C_REGR (hRmI2cCont, FIFO_CONTROL);
+ }while(FifoControlReg & NV_DRF_DEF(I2C, FIFO_CONTROL, TX_FIFO_FLUSH, SET));
+}
+
+static void DoDvcI2cControlInitialization(NvRmI2cControllerHandle hRmI2cCont)
+{
+ NvU32 RegVal = 0;
+
+ RegVal = DVC_REGR(hRmI2cCont, CTRL_REG3);
+ RegVal = NV_FLD_SET_DRF_DEF(DVC, CTRL_REG3, I2C_HW_SW_PROG, SW, RegVal);
+ RegVal = NV_FLD_SET_DRF_DEF(DVC, CTRL_REG3, I2C_DONE_INTR_EN, ENABLE, RegVal);
+ DVC_REGW(hRmI2cCont, CTRL_REG3, RegVal);
+
+ RegVal = DVC_REGR(hRmI2cCont, CTRL_REG1);
+ RegVal = NV_FLD_SET_DRF_DEF(DVC, CTRL_REG1, INTR_EN, ENABLE, RegVal);
+ DVC_REGW(hRmI2cCont, CTRL_REG1, RegVal);
+}
+
+static void UseDvcI2cNewSlave(NvRmI2cControllerHandle hRmI2cCont)
+{
+ NvU32 RegVal = 0;
+ RegVal = NV_DRF_DEF(I2C, I2C_SL_CNFG, NEWSL, ENABLE);
+ I2C2_REGW(hRmI2cCont, I2C_SL_CNFG, RegVal);
+
+ RegVal = NV_DRF_NUM(I2C, I2C_SL_ADDR1, SL_ADDR0, 0xF);
+ I2C2_REGW(hRmI2cCont, I2C_SL_ADDR1, RegVal);
+}
+
+static void
+GetPacketHeaders(
+ NvRmI2cControllerHandle hRmI2cCont,
+ NvRmI2cTransactionInfo *pTransaction,
+ NvU32 PacketId,
+ NvU32 *pPacketHeader1,
+ NvU32 *pPacketHeader2,
+ NvU32 *pPacketHeader3)
+{
+ NvU32 PacketHeader1;
+ NvU32 PacketHeader2;
+ NvU32 PacketHeader3;
+
+ // prepare Generic header1
+ // Header size = 0 Protocol = I2C,pktType = 0
+ PacketHeader1 = NV_DRF_DEF(I2C, IO_PACKET_HEADER, HDRSZ, ONE) |
+ NV_DRF_DEF(I2C, IO_PACKET_HEADER, PROTOCOL, I2C);
+
+ // Set pkt id as 1
+ PacketHeader1 = NV_FLD_SET_DRF_NUM(I2C, IO_PACKET_HEADER, PKTID, PacketId, PacketHeader1);
+
+ // Controller id is according to the instance of the i2c/dvc
+ PacketHeader1 = NV_FLD_SET_DRF_NUM(I2C, IO_PACKET_HEADER,
+ CONTROLLER_ID, hRmI2cCont->ControllerId, PacketHeader1);
+
+ PacketHeader2 = NV_FLD_SET_DRF_NUM(I2C, IO_PACKET_HEADER,
+ PAYLOADSIZE, (pTransaction->NumBytes - 1), 0);
+
+ // prepare IO specific header
+ // Configure the slave address
+ PacketHeader3 = pTransaction->Address;
+
+ // 10 bit address mode: Set address mode to 10 bit
+ if (hRmI2cCont->Is10BitAddress)
+ PacketHeader3 = NV_FLD_SET_DRF_DEF(I2C, IO_PACKET_HEADER, ADDR_MODE,
+ TEN_BIT, PacketHeader3);
+
+ hRmI2cCont->IsCurrentTransferNoAck = NV_FALSE;
+ // Enable mode to handle devices that do not generate ACK
+ if (pTransaction->Flags & NVRM_I2C_NOACK)
+ {
+ PacketHeader3 = NV_FLD_SET_DRF_DEF(I2C, IO_PACKET_HEADER, CONTUNE_ON_NACK,
+ ENABLE, PacketHeader3);
+ hRmI2cCont->IsCurrentTransferNoAck = NV_TRUE;
+ }
+
+ hRmI2cCont->IsCurrentTransferNoStop = NV_FALSE;
+
+ // Enable mode to repeat start if it is configured
+ if (pTransaction->Flags & NVRM_I2C_NOSTOP)
+ {
+ PacketHeader3 = NV_FLD_SET_DRF_DEF(I2C, IO_PACKET_HEADER,REPEAT_START,
+ REPEAT_START, PacketHeader3);
+ hRmI2cCont->IsCurrentTransferNoStop = NV_TRUE;
+ }
+
+ hRmI2cCont->IsCurrentTransferRead = NV_FALSE;
+ // Enable Read if it is required
+ if (!(pTransaction->Flags & NVRM_I2C_WRITE))
+ {
+ PacketHeader3 = NV_FLD_SET_DRF_DEF(I2C, IO_PACKET_HEADER, READ, READ,
+ PacketHeader3);
+ hRmI2cCont->IsCurrentTransferRead = NV_TRUE;
+ }
+
+ *pPacketHeader1 = PacketHeader1;
+ *pPacketHeader2 = PacketHeader2;
+ *pPacketHeader3 = PacketHeader3;
+}
+
+static void StartI2cPacketMode(NvRmI2cControllerHandle hRmI2cCont)
+{
+ NvU32 I2cConfig;
+ // PACKET_MODE_TRANSFER_EN field of I2C Controller configuration Register
+ I2cConfig = NV_DRF_DEF(I2C, I2C_CNFG, NEW_MASTER_FSM, ENABLE);
+ I2cConfig = NV_FLD_SET_DRF_DEF(I2C, I2C_CNFG, PACKET_MODE_EN, GO, I2cConfig);
+ I2C_REGW(hRmI2cCont, I2C_CNFG, I2cConfig);
+}
+
+static void
+DoTxFifoEmpty(
+ NvRmI2cControllerHandle hRmI2cCont,
+ NvU32 *pFifoEmptyCount)
+{
+ NvU32 TFifoEmptyCount = 0;
+ NvU32 FifoStatus;
+
+ // Tx Fifo should be empty. If not force to make it empty
+ FifoStatus = I2C_REGR(hRmI2cCont, FIFO_STATUS);
+ TFifoEmptyCount = NV_DRF_VAL(I2C, FIFO_STATUS, TX_FIFO_EMPTY_CNT, FifoStatus);
+ if (TFifoEmptyCount < I2C_FIFO_DEPTH)
+ ResetTxFifo(hRmI2cCont);
+
+ *pFifoEmptyCount = TFifoEmptyCount;
+}
+
+static void WriteIntMaksReg(NvRmI2cControllerHandle hRmI2cCont)
+{
+#if !NV_OAL
+ I2C_REGW (hRmI2cCont, INTERRUPT_MASK_REGISTER, hRmI2cCont->IntMaskReg);
+#endif
+}
+static void I2cIsr(void* args)
+{
+ NvRmI2cControllerHandle hRmI2cCont = (NvRmI2cControllerHandle)args;
+ NvU32 FifoStatus;
+ NvU32 WordCount;
+ NvU32 FilledSlots;
+ NvU32 MaxWordToRead;
+ NvBool IsFinalIntGot = NV_FALSE;
+ NvU32 FreeSlots;
+ NvU32 MaxWordToWrite;
+ // Read the Interrupt status register & PKT_STATUS
+ hRmI2cCont->ControllerStatus = I2C_REGR(hRmI2cCont, INTERRUPT_STATUS_REGISTER);
+
+ // Write one to clear in the interrupt status register
+ I2C_REGW(hRmI2cCont, INTERRUPT_STATUS_REGISTER, hRmI2cCont->ControllerStatus);
+ FifoStatus = I2C_REGR(hRmI2cCont, FIFO_STATUS);
+
+ DEBUG_I2C_READ(1, ("ISR ContStatus 0x%08x FifoStatus 0x%08x\n",
+ hRmI2cCont->ControllerStatus, FifoStatus));
+
+ if (hRmI2cCont->ControllerStatus & hRmI2cCont->FinalInterrupt)
+ IsFinalIntGot = NV_TRUE;
+
+ if (hRmI2cCont->IsCurrentTransferRead)
+ {
+ // If there is remianing word to read then read here from fifo.
+ if ((hRmI2cCont->WordRemaining) && (!hRmI2cCont->IsUsingApbDma))
+ {
+ DEBUG_I2C_READ(1, ("Reading RxFifo From Int\n"));
+
+ // Get RFifo full count
+ FilledSlots = NV_DRF_VAL(I2C, FIFO_STATUS, RX_FIFO_FULL_CNT, FifoStatus);
+
+ MaxWordToRead = NV_MIN(hRmI2cCont->WordRemaining, FilledSlots);
+ for (WordCount = 0; WordCount < MaxWordToRead; ++WordCount)
+ {
+ // Read data from the I2C RX pkt FIFO Register
+ hRmI2cCont->pDataBuffer[hRmI2cCont->WordTransferred] =
+ I2C_REGR(hRmI2cCont, I2C_RX_FIFO);
+ hRmI2cCont->WordTransferred++;
+ }
+ hRmI2cCont->WordRemaining -= MaxWordToRead;
+
+ if ((IsFinalIntGot) || (hRmI2cCont->WordRemaining == 0))
+ goto FinalIntDone;
+
+ // If still want to receive more than the fifo depth then continue
+ // the int
+ if (hRmI2cCont->WordRemaining > I2C_FIFO_DEPTH)
+ goto IntDone;
+
+ if(hRmI2cCont->IsCurrentTransferNoStop)
+ {
+ // If remaining required read is less than fifo depth then enable the
+ // all tranfer interrupt only and disable the fifo trigger level interrupt
+
+ if (hRmI2cCont->WordRemaining < I2C_FIFO_DEPTH)
+ SetRxFifoTriggerLevel(hRmI2cCont, hRmI2cCont->WordRemaining);
+
+ hRmI2cCont->FinalInterrupt = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ RFIFO_DATA_REQ_INT_EN, ENABLE, hRmI2cCont->FinalInterrupt);
+ }
+ else
+ {
+ hRmI2cCont->IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ RFIFO_DATA_REQ_INT_EN, DISABLE, hRmI2cCont->IntMaskReg);
+
+ hRmI2cCont->IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ ALL_PACKETS_XFER_COMPLETE_INT_EN, ENABLE, hRmI2cCont->IntMaskReg);
+
+
+ hRmI2cCont->FinalInterrupt = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ ALL_PACKETS_XFER_COMPLETE_INT_EN, ENABLE, hRmI2cCont->FinalInterrupt);
+ WriteIntMaksReg(hRmI2cCont);
+ }
+ goto IntDone;
+ }
+ }
+ else
+ {
+ if (IsFinalIntGot)
+ goto FinalIntDone;
+
+ // If there is remaining word to write then keep writing it.
+ if (hRmI2cCont->WordRemaining)
+ {
+ DEBUG_I2C_SEND(1, ("Writing Tx from int\n"));
+
+ // Get TFifo empty count
+ FreeSlots = NV_DRF_VAL(I2C, FIFO_STATUS, TX_FIFO_EMPTY_CNT, FifoStatus);
+ MaxWordToWrite = NV_MIN(hRmI2cCont->WordRemaining, FreeSlots);
+ for (WordCount = 0; WordCount < MaxWordToWrite; ++WordCount)
+ {
+ // Write data into the I2C TX pkt FIFO Register
+ I2C_REGW(hRmI2cCont, I2C_TX_PACKET_FIFO,
+ hRmI2cCont->pDataBuffer[hRmI2cCont->WordTransferred]);
+ hRmI2cCont->WordTransferred++;
+ }
+ hRmI2cCont->WordRemaining -= MaxWordToWrite;
+
+ if (hRmI2cCont->WordRemaining == 0)
+ {
+ if(hRmI2cCont->IsCurrentTransferNoStop)
+ {
+ hRmI2cCont->FinalInterrupt = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ TFIFO_DATA_REQ_INT_EN, ENABLE, hRmI2cCont->FinalInterrupt);
+ }
+ else
+ {
+ hRmI2cCont->IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ TFIFO_DATA_REQ_INT_EN, DISABLE, hRmI2cCont->IntMaskReg);
+
+ hRmI2cCont->IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ ALL_PACKETS_XFER_COMPLETE_INT_EN, ENABLE, hRmI2cCont->IntMaskReg);
+
+
+ hRmI2cCont->FinalInterrupt = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ ALL_PACKETS_XFER_COMPLETE_INT_EN, ENABLE, hRmI2cCont->FinalInterrupt);
+ WriteIntMaksReg(hRmI2cCont);
+ }
+ }
+ }
+ goto IntDone;
+ }
+FinalIntDone:
+ if(IsFinalIntGot)
+ {
+ // Clear interrupt mask register, and DVC interrupt status for DVC I2C.
+ // Note that h/w clean up and use of hRmI2cCont (shared with transaction
+ // API thread) must be completed in ISR before semaphore is signaled.
+ I2C_REGW (hRmI2cCont, INTERRUPT_MASK_REGISTER, 0);
+ hRmI2cCont->IsTransferCompleted = NV_TRUE;
+ if (hRmI2cCont->ModuleId == NvRmModuleID_Dvc)
+ DVC_REGW(hRmI2cCont, STATUS_REG,
+ NV_DRF_NUM(DVC, STATUS_REG, I2C_DONE_INTR, 1));
+ NvOsSemaphoreSignal(hRmI2cCont->I2cSyncSemaphore);
+ goto Done;
+ }
+
+ NvOsDebugPrintf("AP20 I2c Isr got unwanted interrupt IntStatus 0x%08x\n",
+ hRmI2cCont->ControllerStatus);
+ NV_ASSERT(0);
+
+IntDone:
+ if (hRmI2cCont->ModuleId == NvRmModuleID_Dvc)
+ DVC_REGW(hRmI2cCont, STATUS_REG, NV_DRF_NUM(DVC, STATUS_REG, I2C_DONE_INTR, 1));
+Done:
+ NvRmInterruptDone(hRmI2cCont->I2CInterruptHandle);
+}
+
+#if NV_OAL
+static NvError WaitForTransactionCompletesPolling(
+ NvRmI2cControllerHandle hRmI2cCont,
+ NvU32 Timeout)
+{
+ NvU32 RemainingTime = Timeout;
+ do {
+ // Read the Interrupt status register & PKT_STATUS
+ hRmI2cCont->ControllerStatus = I2C_REGR(hRmI2cCont, INTERRUPT_STATUS_REGISTER);
+ if (hRmI2cCont->ControllerStatus & hRmI2cCont->IntMaskReg)
+ {
+ I2cIsr(hRmI2cCont);
+ if (hRmI2cCont->IsTransferCompleted)
+ break;
+ }
+
+ NvOsWaitUS(I2C_POLLING_TIMEOUT_STEP_USEC);
+ RemainingTime = (RemainingTime > I2C_POLLING_TIMEOUT_STEP_USEC)?
+ (RemainingTime - I2C_POLLING_TIMEOUT_STEP_USEC): 0;
+ } while(RemainingTime);
+
+ if (RemainingTime && (hRmI2cCont->ModuleId == NvRmModuleID_Dvc))
+ DVC_REGW(hRmI2cCont, STATUS_REG, NV_DRF_NUM(DVC, STATUS_REG, I2C_DONE_INTR, 1));
+
+ if (!RemainingTime)
+ return NvError_Timeout;
+
+ return NvSuccess;
+}
+#endif
+
+static NvError WaitForTransactionCompletes(
+ NvRmI2cControllerHandle hRmI2cCont,
+ NvU32 Timeout)
+{
+ NvError Error;
+
+ hRmI2cCont->IsTransferCompleted = NV_FALSE;
+
+#if NV_OAL
+ Error = WaitForTransactionCompletesPolling(hRmI2cCont, hRmI2cCont->timeout);
+#else
+ // Wait for the Transfer completes
+ Error = NvOsSemaphoreWaitTimeout(hRmI2cCont->I2cSyncSemaphore, hRmI2cCont->timeout);
+#endif
+ return Error;
+}
+
+
+static NvError
+DoOneReceiveTransaction(
+ NvRmI2cControllerHandle hRmI2cCont,
+ NvU32 PacketId,
+ NvU8* pBuffer,
+ NvRmI2cTransactionInfo *pTransaction,
+ NvU32* pBytesTransferred)
+{
+ NvU32 WordsToRead = 0;
+ NvU32 TFifoEmptyCount = 0;
+ NvError Error = NvSuccess;
+ NvU32 PacketHeader1;
+ NvU32 PacketHeader2;
+ NvU32 PacketHeader3;
+ NvU32 IntMaskReg;
+ NvRmDmaModuleID DmaModuleId = NvRmDmaModuleID_I2c;
+
+ hRmI2cCont->WordTransferred = 0;
+ hRmI2cCont->WordRemaining = 0;
+ hRmI2cCont->TransCountFromLastDmaUsage++;
+
+ GetPacketHeaders(hRmI2cCont, pTransaction, PacketId, &PacketHeader1,
+ &PacketHeader2, &PacketHeader3);
+
+ DoTxFifoEmpty(hRmI2cCont, &TFifoEmptyCount);
+
+
+ IntMaskReg = 0;
+ if (!hRmI2cCont->IsCurrentTransferNoAck)
+ IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ NOACK_INT_EN, ENABLE, IntMaskReg);
+
+ hRmI2cCont->FinalInterrupt = IntMaskReg;
+
+ // Words to read
+ WordsToRead = BYTES_TO_WORD(pTransaction->NumBytes);
+ hRmI2cCont->WordTransferred = 0;
+ hRmI2cCont->WordRemaining = WordsToRead;
+
+ // If requested size is more than cpu transaction thresold then use dma.
+ if ((hRmI2cCont->DmaBufferSize) &&
+ (hRmI2cCont->WordRemaining > I2C_MAX_WORD_TO_USE_CPU))
+ {
+ if (!hRmI2cCont->IsApbDmaAllocated)
+ {
+ if (hRmI2cCont->ModuleId == NvRmModuleID_Dvc)
+ DmaModuleId =NvRmDmaModuleID_Dvc;
+
+ Error = NvRmDmaAllocate(hRmI2cCont->hRmDevice, &hRmI2cCont->hRmDma,
+ NV_FALSE, NvRmDmaPriority_High, DmaModuleId,
+ hRmI2cCont->Instance);
+ if (!Error)
+ hRmI2cCont->IsApbDmaAllocated = NV_TRUE;
+ Error = NvSuccess;
+ }
+ if (!hRmI2cCont->IsApbDmaAllocated)
+ goto CpuBasedReading;
+
+ hRmI2cCont->IsUsingApbDma = NV_TRUE;
+ hRmI2cCont->TransCountFromLastDmaUsage = 0;
+ hRmI2cCont->RxDmaReq.TransferSize = hRmI2cCont->WordRemaining << 2;
+ SetRxFifoTriggerLevel(hRmI2cCont, 1);
+ if (hRmI2cCont->IsCurrentTransferNoStop)
+ {
+#if NV_OAL
+ goto CpuBasedReading;
+#else
+ Error = NvRmDmaStartDmaTransfer(hRmI2cCont->hRmDma, &hRmI2cCont->RxDmaReq,
+ NvRmDmaDirection_Forward, 0, hRmI2cCont->I2cSyncSemaphore);
+#endif
+ }
+ else
+ {
+ Error = NvRmDmaStartDmaTransfer(hRmI2cCont->hRmDma, &hRmI2cCont->RxDmaReq,
+ NvRmDmaDirection_Forward, 0, NULL);
+ }
+ if (!Error)
+ {
+ hRmI2cCont->ControllerStatus = 0;
+ I2C_REGW(hRmI2cCont, I2C_TX_PACKET_FIFO, PacketHeader1);
+ I2C_REGW(hRmI2cCont, I2C_TX_PACKET_FIFO, PacketHeader2);
+
+ // Write I2C specific header
+ I2C_REGW(hRmI2cCont, I2C_TX_PACKET_FIFO, PacketHeader3);
+
+ if (!hRmI2cCont->IsCurrentTransferNoStop)
+ {
+ IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ ALL_PACKETS_XFER_COMPLETE_INT_EN, ENABLE, IntMaskReg);
+ hRmI2cCont->FinalInterrupt = IntMaskReg;
+ }
+ hRmI2cCont->IntMaskReg = IntMaskReg;
+ WriteIntMaksReg(hRmI2cCont);
+ goto WaitForCompletion;
+ }
+ Error = NvSuccess;
+// NvOsDebugPrintf("Read Using Dma\n");
+ }
+
+CpuBasedReading:
+ hRmI2cCont->IsUsingApbDma = NV_FALSE;
+
+ // Enable the Rx trigger level interrupt if the word to read is more than
+ // fifo depth or no stop transfer is selected
+ if ((hRmI2cCont->WordRemaining > I2C_FIFO_DEPTH) ||
+ hRmI2cCont->IsCurrentTransferNoStop)
+ {
+ SetRxFifoTriggerLevel(hRmI2cCont, hRmI2cCont->WordRemaining);
+ IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ RFIFO_DATA_REQ_INT_EN, ENABLE, IntMaskReg);
+ }
+ else
+ {
+ IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ ALL_PACKETS_XFER_COMPLETE_INT_EN, ENABLE, IntMaskReg);
+ hRmI2cCont->FinalInterrupt = IntMaskReg;
+ }
+
+ hRmI2cCont->IntMaskReg = IntMaskReg;
+
+ WriteIntMaksReg(hRmI2cCont);
+
+ //Write Generic Header1 & 2
+ I2C_REGW(hRmI2cCont, I2C_TX_PACKET_FIFO, PacketHeader1);
+ I2C_REGW(hRmI2cCont, I2C_TX_PACKET_FIFO, PacketHeader2);
+
+ // Write I2C specific header
+ I2C_REGW(hRmI2cCont, I2C_TX_PACKET_FIFO, PacketHeader3);
+
+WaitForCompletion:
+ Error = WaitForTransactionCompletes(hRmI2cCont, hRmI2cCont->timeout);
+ if (Error == NvSuccess)
+ {
+ hRmI2cCont->I2cTransferStatus = NvError_I2cReadFailed;
+ if (hRmI2cCont->ControllerStatus & I2C_FIFO_ERROR_INTERRUPTS)
+ {
+ hRmI2cCont->I2cTransferStatus = NvError_I2cReadFailed;
+ goto ReadExitWithReset;
+ }
+ else
+ {
+ if (!hRmI2cCont->IsCurrentTransferNoAck)
+ {
+ if(hRmI2cCont->ControllerStatus &
+ NV_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER, NOACK_INT_EN, ENABLE))
+ {
+ hRmI2cCont->I2cTransferStatus = NvError_I2cDeviceNotFound;
+ goto ReadExitWithReset;
+ }
+ }
+ if (pBytesTransferred != NULL)
+ *pBytesTransferred = pTransaction->NumBytes;
+
+ // Memcopy fifo back to actual buffer given by client
+ if (hRmI2cCont->IsUsingApbDma)
+ NvOsMemcpy(pBuffer, (NvU8* )hRmI2cCont->pDmaBuffer, *pBytesTransferred);
+ else
+ NvOsMemcpy(pBuffer, (NvU8* )hRmI2cCont->pDataBuffer, *pBytesTransferred);
+
+ hRmI2cCont->I2cTransferStatus = NvSuccess;
+ goto ReadExit;
+ }
+ }
+ else if (Error == NvError_Timeout)
+ {
+ DEBUG_I2C_READ(1, ("Read Timeout Error \n"));
+ hRmI2cCont->I2cTransferStatus = NvError_Timeout;
+ }
+
+ReadExitWithReset:
+ // If we reach here then there is something wrong in transfer, reset the module.
+ if (hRmI2cCont->IsUsingApbDma)
+ NvRmDmaAbort(hRmI2cCont->hRmDma);
+
+ // If there is NACK error, then there is possibilty that i2c controller is
+ // still busy to send the stop signal.
+ // Wait for 2x of i2c clock period is recommended, waiting for 1 ms to use
+ // the NvOsMsSleep api.
+ NvOsSleepMS(1);
+
+ NvRmModuleReset(hRmI2cCont->hRmDevice,
+ NVRM_MODULE_ID(hRmI2cCont->ModuleId, hRmI2cCont->Instance));
+ RESET_SEMA_COUNT(hRmI2cCont->I2cSyncSemaphore);
+ReadExit:
+ DEBUG_I2C_READ(1, ("Read Transfer Status 0x%08x \n", hRmI2cCont->I2cTransferStatus));
+
+ // Time to free dma??
+ if ((hRmI2cCont->IsApbDmaAllocated) &&
+ (hRmI2cCont->TransCountFromLastDmaUsage > HOLDING_DMA_TRANSACTION_COUNT))
+ {
+ NvRmDmaFree(hRmI2cCont->hRmDma);
+ hRmI2cCont->hRmDma = NULL;
+ hRmI2cCont->IsApbDmaAllocated = NV_FALSE;
+ }
+ return hRmI2cCont->I2cTransferStatus;
+}
+
+
+static NvError
+DoOneSendTransaction(
+ NvRmI2cControllerHandle hRmI2cCont,
+ NvU32 PacketId,
+ NvU8* pBuffer,
+ NvRmI2cTransactionInfo *pTransaction,
+ NvU32* pBytesTransferred)
+{
+ NvU32 WordsToSend = 0;
+ NvU32 TFifoEmptyCount = 0;
+ NvError Error = NvSuccess;
+ NvU32 PacketHeader1;
+ NvU32 PacketHeader2;
+ NvU32 PacketHeader3;
+ NvU32 IntMaskReg;
+ NvU32 WordCount;
+ NvRmDmaModuleID DmaModuleId = NvRmDmaModuleID_I2c;
+
+ hRmI2cCont->WordTransferred = 0;
+ hRmI2cCont->WordRemaining = 0;
+ hRmI2cCont->TransCountFromLastDmaUsage++;
+
+ GetPacketHeaders(hRmI2cCont, pTransaction, PacketId, &PacketHeader1,
+ &PacketHeader2, &PacketHeader3);
+ DoTxFifoEmpty(hRmI2cCont, &TFifoEmptyCount);
+
+ IntMaskReg = 0;
+ if (!hRmI2cCont->IsCurrentTransferNoAck)
+ IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ NOACK_INT_EN, ENABLE, IntMaskReg);
+
+ hRmI2cCont->FinalInterrupt = IntMaskReg;
+
+ // Words to write
+ WordsToSend = BYTES_TO_WORD(pTransaction->NumBytes);
+ hRmI2cCont->WordTransferred = 0;
+ hRmI2cCont->WordRemaining = WordsToSend;
+
+ if ((hRmI2cCont->DmaBufferSize) &&
+ (hRmI2cCont->WordRemaining > I2C_MAX_WORD_TO_USE_CPU))
+ {
+ if (!hRmI2cCont->IsApbDmaAllocated)
+ {
+ if (hRmI2cCont->ModuleId == NvRmModuleID_Dvc)
+ DmaModuleId =NvRmDmaModuleID_Dvc;
+
+ Error = NvRmDmaAllocate(hRmI2cCont->hRmDevice, &hRmI2cCont->hRmDma,
+ NV_FALSE, NvRmDmaPriority_High, DmaModuleId,
+ hRmI2cCont->Instance);
+ if (!Error)
+ hRmI2cCont->IsApbDmaAllocated = NV_TRUE;
+ Error = NvSuccess;
+ }
+ if (!hRmI2cCont->IsApbDmaAllocated)
+ goto CpuBasedWriting;
+
+ hRmI2cCont->IsUsingApbDma = NV_TRUE;
+ hRmI2cCont->TransCountFromLastDmaUsage = 0;
+ hRmI2cCont->pDmaBuffer[0] = PacketHeader1;
+ hRmI2cCont->pDmaBuffer[1] = PacketHeader2;
+ hRmI2cCont->pDmaBuffer[2] = PacketHeader3;
+ hRmI2cCont->TxDmaReq.TransferSize = (hRmI2cCont->WordRemaining + 3) << 2;
+ NvOsMemcpy(hRmI2cCont->pDmaBuffer + 3, (void *)pBuffer, pTransaction->NumBytes);
+ SetTxFifoTriggerLevel(hRmI2cCont, 8);
+ Error = NvRmDmaStartDmaTransfer(hRmI2cCont->hRmDma, &hRmI2cCont->TxDmaReq,
+ NvRmDmaDirection_Forward, 0, NULL);
+ if (!Error)
+ {
+ hRmI2cCont->WordRemaining = 0;
+ if (hRmI2cCont->IsCurrentTransferNoStop)
+ {
+ IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ TFIFO_DATA_REQ_INT_EN, ENABLE, IntMaskReg);
+ hRmI2cCont->FinalInterrupt = IntMaskReg;
+ }
+ else
+ {
+ IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ ALL_PACKETS_XFER_COMPLETE_INT_EN, ENABLE, IntMaskReg);
+ hRmI2cCont->FinalInterrupt = IntMaskReg;
+ }
+ goto WaitForCompletion;
+ }
+
+// NvOsDebugPrintf("Send Using Dma\n");
+ Error = NvSuccess;
+ }
+
+CpuBasedWriting:
+ hRmI2cCont->IsUsingApbDma = NV_FALSE;
+
+ //Write Generic Header1 & 2
+ I2C_REGW(hRmI2cCont, I2C_TX_PACKET_FIFO, PacketHeader1);
+ I2C_REGW(hRmI2cCont, I2C_TX_PACKET_FIFO, PacketHeader2);
+
+ // Write I2C specific header
+ I2C_REGW(hRmI2cCont, I2C_TX_PACKET_FIFO, PacketHeader3);
+
+ TFifoEmptyCount -= 3;
+
+ if (hRmI2cCont->WordRemaining)
+ {
+ NvOsMemcpy(hRmI2cCont->pDataBuffer, (void *)pBuffer, pTransaction->NumBytes);
+
+ WordsToSend = NV_MIN(hRmI2cCont->WordRemaining, TFifoEmptyCount);
+ for (WordCount = 0; WordCount < WordsToSend; WordCount++)
+ {
+ // Write data into the I2C TX pkt FIFO Register
+ I2C_REGW(hRmI2cCont, I2C_TX_PACKET_FIFO,
+ hRmI2cCont->pDataBuffer[hRmI2cCont->WordTransferred]);
+ hRmI2cCont->WordTransferred++;
+ }
+ hRmI2cCont->WordRemaining -= WordsToSend;
+
+ if (hRmI2cCont->WordRemaining == 0)
+ {
+ if (hRmI2cCont->IsCurrentTransferNoStop)
+ {
+ SetTxFifoTriggerLevel(hRmI2cCont, 8);
+ IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ TFIFO_DATA_REQ_INT_EN, ENABLE, IntMaskReg);
+ hRmI2cCont->FinalInterrupt = IntMaskReg;
+ }
+ else
+ {
+ IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ ALL_PACKETS_XFER_COMPLETE_INT_EN, ENABLE, IntMaskReg);
+ hRmI2cCont->FinalInterrupt = IntMaskReg;
+ }
+ }
+ else
+ {
+ SetTxFifoTriggerLevel(hRmI2cCont, 8);
+ IntMaskReg = NV_FLD_SET_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER,
+ TFIFO_DATA_REQ_INT_EN, ENABLE, IntMaskReg);
+ }
+ }
+
+WaitForCompletion:
+ hRmI2cCont->IntMaskReg = IntMaskReg;
+ WriteIntMaksReg(hRmI2cCont);
+ Error = WaitForTransactionCompletes(hRmI2cCont, hRmI2cCont->timeout);
+ if (Error == NvSuccess)
+ {
+ hRmI2cCont->I2cTransferStatus = NvError_I2cWriteFailed;
+ if (hRmI2cCont->ControllerStatus & I2C_FIFO_ERROR_INTERRUPTS)
+ {
+ hRmI2cCont->I2cTransferStatus = NvError_I2cWriteFailed;
+ goto WriteExitWithReset;
+ }
+ else
+ {
+ if (!hRmI2cCont->IsCurrentTransferNoAck)
+ {
+ if(hRmI2cCont->ControllerStatus &
+ NV_DRF_DEF(I2C, INTERRUPT_MASK_REGISTER, NOACK_INT_EN, ENABLE))
+ {
+ hRmI2cCont->I2cTransferStatus = NvError_I2cDeviceNotFound;
+ goto WriteExitWithReset;
+ }
+ }
+ if (pBytesTransferred != NULL)
+ *pBytesTransferred = pTransaction->NumBytes;
+ hRmI2cCont->I2cTransferStatus = NvSuccess;
+ goto WriteExit;
+ }
+ }
+ else if (Error == NvError_Timeout)
+ {
+ DEBUG_I2C_SEND(1, ("SEND Timeout Error \n"));
+ hRmI2cCont->I2cTransferStatus = NvError_Timeout;
+ }
+
+WriteExitWithReset:
+ if (hRmI2cCont->IsUsingApbDma)
+ NvRmDmaAbort(hRmI2cCont->hRmDma);
+
+ // If there is NACK error, then there is possibilty that i2c controller is
+ // still busy to send the stop signal.
+ // Wait for 2x of i2c clock period is recommended, waiting for 1 ms to use
+ // the NvOsMsSleep api.
+ NvOsSleepMS(1);
+
+ // If we reach here then there is something wrong in transfer, reset the module.
+ NvRmModuleReset(hRmI2cCont->hRmDevice,
+ NVRM_MODULE_ID(hRmI2cCont->ModuleId, hRmI2cCont->Instance));
+ RESET_SEMA_COUNT(hRmI2cCont->I2cSyncSemaphore);
+WriteExit:
+ DEBUG_I2C_SEND(1, ("Send Transfer Status 0x%08x \n", hRmI2cCont->I2cTransferStatus));
+
+ // Time to free dma??
+ if ((hRmI2cCont->IsApbDmaAllocated) &&
+ (hRmI2cCont->TransCountFromLastDmaUsage > HOLDING_DMA_TRANSACTION_COUNT))
+ {
+ NvRmDmaFree(hRmI2cCont->hRmDma);
+ hRmI2cCont->hRmDma = NULL;
+ hRmI2cCont->IsApbDmaAllocated = NV_FALSE;
+ }
+
+ return hRmI2cCont->I2cTransferStatus;
+}
+
+
+static NvError
+DoMultiReceiveTransaction(
+ NvRmI2cControllerHandle hRmI2cCont,
+ NvU32 *pPacketId,
+ NvU8* pBuffer,
+ const NvRmI2cTransactionInfo *pTransaction,
+ NvU32* pBytesTransferred)
+{
+ NvRmI2cTransactionInfo Transaction;
+ NvU32 BytesTransferredYet = 0;
+ NvU32 TotalBytesRequested;
+ NvU8 *pReadBuffer = pBuffer;
+ NvError Error = NvSuccess;
+ NvU32 CurrBytesRequested;
+ NvU32 PacketId;
+ NvU32 BytesTransferred = 0;
+
+ Transaction.Is10BitAddress = pTransaction->Is10BitAddress;
+ Transaction.Address = pTransaction->Address;
+ TotalBytesRequested = pTransaction->NumBytes;
+ PacketId = *pPacketId;
+ while (TotalBytesRequested)
+ {
+ Transaction.Flags = pTransaction->Flags;
+ CurrBytesRequested = TotalBytesRequested;
+ if (TotalBytesRequested > MAX_I2C_ONE_TRANSACTION_SIZE)
+ {
+ Transaction.Flags |= NVRM_I2C_NOSTOP;
+ CurrBytesRequested = MAX_I2C_ONE_TRANSACTION_SIZE;
+ }
+ Transaction.NumBytes = CurrBytesRequested;
+ Error = DoOneReceiveTransaction(hRmI2cCont, PacketId, pReadBuffer,
+ &Transaction, &BytesTransferred);
+ if (Error)
+ break;
+ BytesTransferredYet += CurrBytesRequested;
+ pReadBuffer += CurrBytesRequested;
+ TotalBytesRequested -= CurrBytesRequested;
+ PacketId++;
+ I2C_REGW(hRmI2cCont, INTERRUPT_MASK_REGISTER, 0);
+ }
+ *pPacketId = PacketId;
+ *pBytesTransferred = BytesTransferred;
+ return Error;
+}
+
+static NvError
+DoMultiSendTransaction(
+ NvRmI2cControllerHandle hRmI2cCont,
+ NvU32 *pPacketId,
+ NvU8* pBuffer,
+ const NvRmI2cTransactionInfo *pTransaction,
+ NvU32* pBytesTransferred)
+
+{
+ NvRmI2cTransactionInfo Transaction;
+ NvU32 BytesTransferredYet = 0;
+ NvU32 TotalBytesRequested;
+ NvU8 *pReadBuffer = pBuffer;
+ NvError Error = NvSuccess;
+ NvU32 CurrBytesRequested;
+ NvU32 PacketId;
+ NvU32 BytesTransferred = 0;
+
+ Transaction.Is10BitAddress = pTransaction->Is10BitAddress;
+ Transaction.Address = pTransaction->Address;
+ TotalBytesRequested = pTransaction->NumBytes;
+ PacketId = *pPacketId;
+ while (TotalBytesRequested)
+ {
+ Transaction.Flags = pTransaction->Flags;
+ CurrBytesRequested = TotalBytesRequested;
+ if (TotalBytesRequested > MAX_I2C_ONE_TRANSACTION_SIZE)
+ {
+ Transaction.Flags |= NVRM_I2C_NOSTOP;
+ CurrBytesRequested = MAX_I2C_ONE_TRANSACTION_SIZE;
+ }
+ Transaction.NumBytes = CurrBytesRequested;
+ Error = DoOneSendTransaction(hRmI2cCont, PacketId, pReadBuffer,
+ &Transaction, &BytesTransferred);
+ if (Error)
+ break;
+ BytesTransferredYet += CurrBytesRequested;
+ pReadBuffer += CurrBytesRequested;
+ TotalBytesRequested -= CurrBytesRequested;
+ PacketId++;
+ I2C_REGW(hRmI2cCont, INTERRUPT_MASK_REGISTER, 0);
+ }
+ *pPacketId = PacketId;
+ *pBytesTransferred = BytesTransferred;
+ return Error;
+}
+
+
+static NvError
+AP20RmI2cReceive(
+ NvRmI2cControllerHandle hRmI2cCont,
+ NvU8* pBuffer,
+ const NvRmI2cTransactionInfo *pTransaction,
+ NvU32* pBytesTransferred)
+{
+ NvError Error = NvSuccess;
+ NvU32 PacketId = 1;
+
+ NV_ASSERT(pBuffer);
+ NV_ASSERT(pTransaction->NumBytes > 0);
+
+ DEBUG_I2C_TRACE(1, ("AP20RmI2cReceive()++ 0x%08x and add 0x%02x\n", pTransaction->NumBytes, Transaction.Address));
+
+ if (hRmI2cCont->ModuleId == NvRmModuleID_Dvc)
+ DoDvcI2cControlInitialization(hRmI2cCont);
+
+ // Clear interrupt mask register to avoid any false interrupts.
+ I2C_REGW(hRmI2cCont, INTERRUPT_MASK_REGISTER, 0);
+
+ // Start the packet mode
+ StartI2cPacketMode(hRmI2cCont);
+
+ Error = DoMultiReceiveTransaction(hRmI2cCont, &PacketId, pBuffer,
+ pTransaction, pBytesTransferred);
+ DEBUG_I2C_TRACE(1, ("AP20RmI2cReceive()-- 0x%08x\n", Error));
+ return Error;
+}
+
+
+static NvError
+AP20RmI2cSend(
+ NvRmI2cControllerHandle hRmI2cCont,
+ NvU8* pBuffer,
+ const NvRmI2cTransactionInfo *pTransaction,
+ NvU32* pBytesTransferred)
+{
+ NvError Error = NvSuccess;
+ NvU32 PacketId = 1;
+
+ NV_ASSERT(pBuffer);
+ NV_ASSERT(pTransaction->NumBytes > 0);
+
+ DEBUG_I2C_TRACE(1, ("AP20RmI2cSend()++ 0x%08x and 0x%02x\n", pTransaction->NumBytes, Transaction.Address));
+
+ if (hRmI2cCont->ModuleId == NvRmModuleID_Dvc)
+ DoDvcI2cControlInitialization(hRmI2cCont);
+
+ // Clear interrupt mask register to avoid any false interrupts.
+ I2C_REGW(hRmI2cCont, INTERRUPT_MASK_REGISTER, 0);
+
+ // Start the packet mode
+ StartI2cPacketMode(hRmI2cCont);
+
+ Error = DoMultiSendTransaction(hRmI2cCont, &PacketId, pBuffer,
+ pTransaction, pBytesTransferred);
+ DEBUG_I2C_TRACE(1, ("AP20RmI2cSend()-- 0x%08x\n", Error));
+ return Error;
+}
+
+static NvError
+AP20RmI2cRepeatStartTransaction(
+ NvRmI2cControllerHandle hRmI2cCont,
+ NvU8* pBuffer,
+ NvRmI2cTransactionInfo *pTransactions,
+ NvU32 NoOfTransations)
+{
+ NvError Error = NvSuccess;
+ NvU8 *pReqBuffer = pBuffer;
+ NvU32 BytesSend;
+ NvU32 BytesRecvd;
+ NvU32 PacketId;
+ NvU32 TransCount;
+
+ NV_ASSERT(pBuffer);
+ NV_ASSERT(pTransactions);
+ NV_ASSERT(pBuffer);
+
+ DEBUG_I2C_TRACE(1, ("AP20RmI2cRepeatStartTransaction()++ 0x%08x and 0x%02x\n", NoOfTransations, pTransactions[0].Address));
+
+ if (hRmI2cCont->ModuleId == NvRmModuleID_Dvc)
+ {
+ DoDvcI2cControlInitialization(hRmI2cCont);
+ UseDvcI2cNewSlave(hRmI2cCont);
+ }
+
+ // Clear interrupt mask register to avoid any false interrupts.
+ I2C_REGW(hRmI2cCont, INTERRUPT_MASK_REGISTER, 0);
+
+ // Start the packet mode
+ StartI2cPacketMode(hRmI2cCont);
+
+ PacketId = 1;
+ for (TransCount = 0; TransCount < NoOfTransations; TransCount++)
+ {
+ if (pTransactions[TransCount].Flags & NVRM_I2C_WRITE)
+ {
+ Error = DoMultiSendTransaction(hRmI2cCont, &PacketId,
+ pReqBuffer, &pTransactions[TransCount], &BytesSend);
+ }
+ else
+ {
+ Error = DoMultiReceiveTransaction(hRmI2cCont, &PacketId,
+ pReqBuffer, &pTransactions[TransCount], &BytesRecvd);
+ }
+ if (Error)
+ {
+ DEBUG_I2C_TRACE(1, ("AP20RmI2cRepeatStartTransaction()-- 0x%08x at Transaction %d \n", Error, TransCount));
+ break;
+ }
+ pReqBuffer += pTransactions[TransCount].NumBytes;
+
+ I2C_REGW(hRmI2cCont, INTERRUPT_MASK_REGISTER, 0);
+ PacketId++;
+ }
+ DEBUG_I2C_TRACE(1, ("AP20RmI2cRepeatStartTransaction()-- 0x%08x at Transaction %d \n", Error, TransCount));
+ return Error;
+}
+
+static NvBool AP20RmI2cGetGpioPins(
+ NvRmI2cControllerHandle hRmI2cCont,
+ NvU32 I2cPinMap,
+ NvU32 *pScl,
+ NvU32 *pSda)
+{
+ NvU32 SclPin = 0;
+ NvU32 SdaPin = 0;
+ NvU32 SclPort = 0;
+ NvU32 SdaPort = 0;
+ NvBool Result = NV_TRUE;
+
+ NV_ASSERT((pScl!=NULL) && (pSda!=NULL));
+
+ if (hRmI2cCont->ModuleId == NvRmModuleID_I2c)
+ {
+ switch ((hRmI2cCont->Instance<<4) | I2cPinMap)
+ {
+ case ((0<<4) | 1):
+ SclPort = 'c' - 'a';
+ SdaPort = 'c' - 'a';
+ SclPin = 4;
+ SdaPin = 5;
+ break;
+ case ((0<<4) | 2):
+ SclPort = 'k' - 'a';
+ SdaPort = 'k' - 'a';
+ SclPin = 5;
+ SdaPin = 6;
+ break;
+ case ((0<<4) | 3):
+ SclPort = 'w' - 'a';
+ SdaPort = 'w' - 'a';
+ SclPin = 2;
+ SdaPin = 3;
+ break;
+ /* NOTE: The pins used by pin map 1 for instance 1 are not
+ * connected to a GPIO controller (DDC pins), so the software
+ * fallback is not supported for them. */
+ case ((1<<4) | 2):
+ SclPort = 't' - 'a';
+ SdaPort = 't' - 'a';
+ SclPin = 5;
+ SdaPin = 6;
+ break;
+ case ((2<<4) | 1):
+ // Port 'BB'
+ SclPort = 'z' - 'a' + 2;
+ SdaPort = 'z' - 'a' + 2;
+ SclPin = 2;
+ SdaPin = 3;
+ break;
+ default:
+ Result = NV_FALSE;
+ }
+ }
+ else if ((hRmI2cCont->ModuleId == NvRmModuleID_Dvc) &&
+ (hRmI2cCont->Instance == 0) &&
+ (I2cPinMap == NvOdmI2cPmuPinMap_Config1))
+ {
+ SclPin = 6;
+ SdaPin = 7;
+ SclPort = 'z' - 'a';
+ SdaPort = 'z' - 'a';
+ }
+ else
+ Result = NV_FALSE;
+
+ *pScl = (SclPin | (SclPort<<16));
+ *pSda = (SdaPin | (SdaPort<<16));
+ return Result;
+}
+
+static void AP20RmI2cClose(NvRmI2cControllerHandle hRmI2cCont)
+{
+
+ if (hRmI2cCont->I2cSyncSemaphore)
+ {
+ NvRmInterruptUnregister(hRmI2cCont->hRmDevice, hRmI2cCont->I2CInterruptHandle);
+ NvOsSemaphoreDestroy(hRmI2cCont->I2cSyncSemaphore);
+ hRmI2cCont->I2cSyncSemaphore = NULL;
+ hRmI2cCont->I2CInterruptHandle = NULL;
+ }
+
+ if (hRmI2cCont->pCpuBuffer)
+ {
+ NvOsFree(hRmI2cCont->pCpuBuffer);
+ hRmI2cCont->pCpuBuffer = NULL;
+ hRmI2cCont->pDataBuffer = NULL;
+ }
+
+ if (hRmI2cCont->hRmDma)
+ {
+ NvRmDmaAbort(hRmI2cCont->hRmDma);
+ NvRmDmaFree(hRmI2cCont->hRmDma);
+ }
+
+ DestroyDmaTransferBuffer(hRmI2cCont->hRmMemory, hRmI2cCont->pDmaBuffer,
+ hRmI2cCont->DmaBufferSize);
+
+ hRmI2cCont->hRmDma = NULL;
+ hRmI2cCont->hRmMemory = NULL;
+ hRmI2cCont->DmaBuffPhysAdd = 0;
+ hRmI2cCont->pDmaBuffer = NULL;
+
+ hRmI2cCont->receive = 0;
+ hRmI2cCont->send = 0;
+ hRmI2cCont->repeatStart = 0;
+ hRmI2cCont->close = 0;
+ hRmI2cCont->GetGpioPins = 0;
+}
+
+NvError AP20RmI2cOpen(NvRmI2cControllerHandle hRmI2cCont)
+{
+ NvError Error = NvSuccess;
+ NvU32 IrqList;
+ NvOsInterruptHandler IntHandlers = I2cIsr;
+ NvU32 RxFifoPhyAddress;
+ NvU32 TxFifoPhyAddress;
+
+ NV_ASSERT(hRmI2cCont);
+ DEBUG_I2C_TRACE(1, ("AP20RmI2cOpen\n"));
+
+ // Polulate the structures
+ hRmI2cCont->receive = AP20RmI2cReceive;
+ hRmI2cCont->send = AP20RmI2cSend;
+ hRmI2cCont->repeatStart = AP20RmI2cRepeatStartTransaction;
+ hRmI2cCont->close = AP20RmI2cClose;
+ hRmI2cCont->GetGpioPins = AP20RmI2cGetGpioPins;
+ hRmI2cCont->I2cRegisterOffset = I2C_I2C_CNFG_0;
+ hRmI2cCont->ControllerId = hRmI2cCont->Instance;
+
+ hRmI2cCont->hRmDma = NULL;
+ hRmI2cCont->hRmMemory = NULL;
+ hRmI2cCont->DmaBuffPhysAdd = 0;
+ hRmI2cCont->pDmaBuffer = NULL;
+
+ hRmI2cCont->pCpuBuffer = NULL;
+ hRmI2cCont->pDataBuffer = NULL;
+ hRmI2cCont->I2cSyncSemaphore = NULL;
+ hRmI2cCont->I2CInterruptHandle = NULL;
+ hRmI2cCont->TransCountFromLastDmaUsage = 0;
+
+ TxFifoPhyAddress = hRmI2cCont->I2cRegisterOffset + I2C_I2C_TX_PACKET_FIFO_0;
+ RxFifoPhyAddress = hRmI2cCont->I2cRegisterOffset + I2C_I2C_RX_FIFO_0;
+
+ if (hRmI2cCont->ModuleId == NvRmModuleID_Dvc)
+ {
+ hRmI2cCont->I2cRegisterOffset = 0;
+ hRmI2cCont->ControllerId = 3;
+ RxFifoPhyAddress = DVC_I2C_RX_FIFO_0;
+ TxFifoPhyAddress = DVC_I2C_TX_PACKET_FIFO_0;
+ }
+
+ hRmI2cCont->IsApbDmaAllocated = NV_FALSE;
+ hRmI2cCont->hRmDma = NULL;
+
+ // Allocate the dma buffer
+ hRmI2cCont->DmaBufferSize = 0;
+ Error = CreateDmaTransferBuffer(hRmI2cCont->hRmDevice, &hRmI2cCont->hRmMemory,
+ &hRmI2cCont->DmaBuffPhysAdd, (void **)&hRmI2cCont->pDmaBuffer,
+ DEFAULT_I2C_DMA_BUFFER_SIZE);
+ if (Error)
+ {
+ hRmI2cCont->hRmMemory = NULL;
+ hRmI2cCont->DmaBuffPhysAdd = 0;
+ hRmI2cCont->pDmaBuffer = NULL;
+ Error = NvSuccess;
+ }
+ else
+ {
+ hRmI2cCont->DmaBufferSize = DEFAULT_I2C_DMA_BUFFER_SIZE;
+
+ hRmI2cCont->RxDmaReq.SourceBufferPhyAddress= RxFifoPhyAddress;
+ hRmI2cCont->RxDmaReq.DestinationBufferPhyAddress = hRmI2cCont->DmaBuffPhysAdd;
+ hRmI2cCont->RxDmaReq.SourceAddressWrapSize = 4;
+ hRmI2cCont->RxDmaReq.DestinationAddressWrapSize = 0;
+
+ hRmI2cCont->TxDmaReq.SourceBufferPhyAddress= hRmI2cCont->DmaBuffPhysAdd;
+ hRmI2cCont->TxDmaReq.DestinationBufferPhyAddress = TxFifoPhyAddress;
+ hRmI2cCont->TxDmaReq.SourceAddressWrapSize = 0;
+ hRmI2cCont->TxDmaReq.DestinationAddressWrapSize = 4;
+ }
+
+ if (!Error)
+ {
+ hRmI2cCont->pCpuBuffer = NvOsAlloc(DEFAULT_I2C_CPU_BUFFER_SIZE);
+ if (!hRmI2cCont->pCpuBuffer)
+ Error = NvError_InsufficientMemory;
+ }
+
+ if (!Error)
+ hRmI2cCont->pDataBuffer = hRmI2cCont->pCpuBuffer;
+
+ // Create the sync semaphore for the interrupt synchrnoisation
+ if (!Error)
+ Error = NvOsSemaphoreCreate( &hRmI2cCont->I2cSyncSemaphore, 0);
+
+ if (!Error)
+ {
+ IrqList = NvRmGetIrqForLogicalInterrupt(
+ hRmI2cCont->hRmDevice, NVRM_MODULE_ID(hRmI2cCont->ModuleId, hRmI2cCont->Instance), 0);
+
+ Error = NvRmInterruptRegister(hRmI2cCont->hRmDevice, 1, &IrqList, &IntHandlers,
+ hRmI2cCont, &hRmI2cCont->I2CInterruptHandle, NV_TRUE);
+ }
+
+ // Packet mode initialization
+ hRmI2cCont->RsTransfer = NV_FALSE;
+
+ // If error then destroy all the allocation done here.
+ if (Error)
+ AP20RmI2cClose(hRmI2cCont);
+
+ return Error;
+}
diff --git a/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_owr.c b/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_owr.c
new file mode 100644
index 000000000000..fe2d64c8953e
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_owr.c
@@ -0,0 +1,853 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/** @file
+ * @brief <b>NVIDIA Driver Development Kit: OWR API</b>
+ *
+ * @b Description: Contains the NvRM OWR implementation. for Ap20
+ */
+
+#include "nvrm_owr.h"
+#include "nvrm_drf.h"
+#include "nvos.h"
+#include "nvrm_module.h"
+#include "ap20/arowr.h"
+#include "nvrm_hardware_access.h"
+#include "nvrm_power.h"
+#include "nvrm_interrupt.h"
+#include "nvassert.h"
+#include "nvodm_query_pinmux.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_hwintf.h"
+#include "nvrm_owr_private.h"
+#include "nvodm_query.h"
+#include "nvrm_module.h"
+
+// Enable the following flag for debug messages
+#define ENABLE_OWR_DEBUG 0
+
+#if ENABLE_OWR_DEBUG
+#define OWR_PRINT(X) NvOsDebugPrintf X
+#else
+#define OWR_PRINT(X)
+#endif
+
+// Enabling the following flag for enabling the polling in bit transfer mode
+#define OWR_BIT_TRANSFER_POLLING_MODE 0
+
+/* Timeout for transferring a bit in micro seconds */
+#define BIT_TRASNFER_DONE_TIMEOUT_USEC 1000
+
+/* Polling timeout steps for transferring a bit in micro seconds */
+#define BIT_TRASNFER_DONE_STEP_TIMEOUT_USEC 10
+
+/* Semaphore timeout for bit/byte transfers */
+#define OWR_TRANSFER_TIMEOUT_MILLI_SEC 5000
+
+/* OWR controller errors in byte transfer mode */
+#define OWR_BYTE_TRANSFER_ERRORS 0x70F
+
+/* OWR controller errors in bit transfer mode */
+#define OWR_BIT_TRANSFER_ERRORS 0x1
+
+/* OWR CRC size in bytes */
+#define OWR_CRC_SIZE_BYTES 1
+
+/* OWR ROM command size */
+#define OWR_ROM_CMD_SIZE_BYTES 1
+
+/* OWR MEM command size */
+#define OWR_MEM_CMD_SIZE_BYTES 1
+
+/* OWR fifo depth */
+#define OWR_FIFO_DEPTH 32
+/* OWR fifo word size */
+#define OWR_FIFO_WORD_SIZE 4
+
+
+/* default read data clock value */
+#define OWR_DEFAULT_READ_DTA_CLK_VALUE 0x7
+/* default read presence clock value */
+#define OWR_DEFAULT_PRESENCE_CLK_VALUE 0x50
+/* Default OWR device memory offset size */
+#define OWR_DEFAULT_OFFSET_SIZE_BYTES 2
+
+
+/* Register access Macros */
+#define OWR_REGR(OwrVirtualAddress, reg) \
+ NV_READ32((OwrVirtualAddress) + ((OWR_##reg##_0)/4))
+
+#define OWR_REGW(OwrVirtualAddress, reg, val) \
+ do\
+ {\
+ NV_WRITE32((((OwrVirtualAddress) + ((OWR_##reg##_0)/4))), (val));\
+ }while (0)
+
+void PrivOwrEnableInterrupts(NvRmOwrController *pOwrInfo, NvU32 OwrIntStatus);
+NvError PrivOwrSendCommand(NvRmOwrController *pOwrInfo, NvU32 Command);
+
+NvError
+PrivOwrCheckBitTransferDone(
+ NvRmOwrController* pOwrInfo,
+ OwrIntrStatus status);
+
+NvError
+PrivOwrReadData(
+ NvRmOwrController *pOwrInfo,
+ NvU8* Buffer,
+ NvU32 NoOfBytes);
+
+static NvError
+PrivOwrReadFifo(
+ NvRmOwrController* pOwrInfo,
+ NvU8* pBuffer,
+ NvRmOwrTransactionInfo Transaction,
+ const NvOdmQueryOwrDeviceInfo* pOdmInfo,
+ NvU32 NumBytes);
+
+
+void PrivOwrEnableInterrupts(NvRmOwrController *pOwrInfo, NvU32 OwrIntStatus)
+{
+ // Write to the interrupt status register
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, INTR_MASK, OwrIntStatus);
+}
+
+NvError
+PrivOwrCheckBitTransferDone(
+ NvRmOwrController* pOwrInfo,
+ OwrIntrStatus status)
+{
+
+#if OWR_BIT_TRANSFER_POLLING_MODE
+
+ NvU32 timeout = 0;
+ NvU32 val = 0;
+
+ // Check for presence
+ while(timeout < BIT_TRASNFER_DONE_TIMEOUT_USEC)
+ {
+ val = OWR_REGR(pOwrInfo->pOwrVirtualAddress, INTR_STATUS);
+ if (val & status)
+ {
+ // clear the bit transfer done status
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, INTR_STATUS, val);
+ break;
+ }
+ NvOsWaitUS(BIT_TRASNFER_DONE_STEP_TIMEOUT_USEC);
+ timeout += BIT_TRASNFER_DONE_STEP_TIMEOUT_USEC;
+ }
+
+ if (timeout >= BIT_TRASNFER_DONE_TIMEOUT_USEC)
+ {
+ return NvError_Timeout;
+ }
+
+ return NvSuccess;
+#else
+ // wait for the read to complete
+ status = NvOsSemaphoreWaitTimeout(pOwrInfo->OwrSyncSemaphore,
+ OWR_TRANSFER_TIMEOUT_MILLI_SEC);
+ if (status == NvSuccess)
+ {
+ if (pOwrInfo->OwrTransferStatus & OWR_BIT_TRANSFER_ERRORS)
+ {
+ status = NvError_OwrBitTransferFailed;
+ NvRmModuleReset(pOwrInfo->hRmDevice,
+ NVRM_MODULE_ID(NvRmModuleID_OneWire, pOwrInfo->Instance));
+ OWR_PRINT(("RM_OWR Bit mode error[0x%x]\n",
+ pOwrInfo->OwrTransferStatus));
+ }
+ else if(!pOwrInfo->OwrTransferStatus)
+ {
+ status = NvError_OwrBitTransferFailed;
+ NvRmModuleReset(pOwrInfo->hRmDevice,
+ NVRM_MODULE_ID(NvRmModuleID_OneWire, pOwrInfo->Instance));
+ OWR_PRINT(("RM_OWR bit mode spurious interrupt [0x%x]\n",
+ pOwrInfo->OwrTransferStatus));
+ NV_ASSERT(!"RM_OWR spurious interrupt in Bit transfer mode\n");
+ }
+ }
+ pOwrInfo->OwrTransferStatus = 0;
+ return status;
+#endif
+}
+
+NvError PrivOwrSendCommand(NvRmOwrController *pOwrInfo, NvU32 Command)
+{
+ NvU32 val = 0;
+ NvU32 data = Command;
+ NvError status = NvError_Timeout;
+ NvU32 i =0;
+ NvU32 ControlReg = 0;
+
+ val =
+ (NV_DRF_NUM(OWR, CONTROL, RD_DATA_SAMPLE_CLK, 0x7) |
+ NV_DRF_NUM(OWR, CONTROL, PRESENCE_SAMPLE_CLK, 0x50) |
+ NV_DRF_DEF(OWR, CONTROL, DATA_TRANSFER_MODE, BIT_TRANSFER_MODE));
+
+ for (i = 0; i < OWR_NO_OF_BITS_PER_BYTE; i++)
+ {
+
+ if (data & 0x1)
+ {
+ ControlReg =
+ val | (NV_DRF_DEF(OWR, CONTROL, WR1_BIT, TRANSFER_ONE));
+ }
+ else
+ {
+ ControlReg =
+ val | (NV_DRF_DEF(OWR, CONTROL, WR0_BIT, TRANSFER_ZERO));
+ }
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, CONTROL, ControlReg);
+
+ status = PrivOwrCheckBitTransferDone(pOwrInfo,
+ OwrIntrStatus_BitTransferDoneIntEnable);
+ if (status != NvSuccess)
+ {
+ return status;
+ }
+
+ data = (data >> 1);
+ }
+
+ return NvSuccess;
+}
+
+NvError
+PrivOwrReadData(
+ NvRmOwrController *pOwrInfo,
+ NvU8* Buffer,
+ NvU32 NoOfBytes)
+{
+ NvU32 ControlReg = 0;
+ NvError status = NvError_Timeout;
+ NvU8* pBuf = Buffer;
+ NvU32 val = 0;
+ NvU32 i =0;
+ NvU32 j =0;
+
+ NvOsMemset(pBuf, 0, NoOfBytes);
+
+ ControlReg =
+ NV_DRF_NUM(OWR, CONTROL, RD_DATA_SAMPLE_CLK, 0x7) |
+ NV_DRF_NUM(OWR, CONTROL, PRESENCE_SAMPLE_CLK, 0x50) |
+ NV_DRF_DEF(OWR, CONTROL, DATA_TRANSFER_MODE, BIT_TRANSFER_MODE) |
+ NV_DRF_DEF(OWR, CONTROL, RD_BIT, TRANSFER_READ_SLOT);
+
+ for (i = 0; i < NoOfBytes; i++)
+ {
+ for (j = 0; j < OWR_NO_OF_BITS_PER_BYTE; j++)
+ {
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, CONTROL, ControlReg);
+ status = PrivOwrCheckBitTransferDone(pOwrInfo,
+ OwrIntrStatus_BitTransferDoneIntEnable);
+ if (status != NvSuccess)
+ {
+ return status;
+ }
+ val = OWR_REGR(pOwrInfo->pOwrVirtualAddress, STATUS);
+ val = NV_DRF_VAL(OWR, STATUS, READ_SAMPLED_BIT, val);
+ *pBuf |= (val << j);
+ }
+ pBuf++;
+ }
+ return NvSuccess;
+}
+
+static NvError
+PrivOwrReadFifo(
+ NvRmOwrController* pOwrInfo,
+ NvU8* pBuffer,
+ NvRmOwrTransactionInfo Transaction,
+ const NvOdmQueryOwrDeviceInfo* pOdmInfo,
+ NvU32 NumBytes)
+{
+ NvU32 val = 0;
+ NvError status = NvError_OwrReadFailed;
+ NvU32 BytesToRead = 0;
+ NvU32 WordsToRead = 0;
+ NvU32 ReadDataClk = OWR_DEFAULT_READ_DTA_CLK_VALUE;
+ NvU32 PresenceClk = OWR_DEFAULT_PRESENCE_CLK_VALUE;
+ NvU32 i = 0;
+
+ if (pOdmInfo)
+ {
+ ReadDataClk = pOdmInfo->ReadDataSampleClk;
+ PresenceClk = pOdmInfo->PresenceSampleClk;
+ }
+
+ // Configure the number of bytes to read
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, EPROM, (NumBytes - 1));
+
+ // Configure the read, presence sample clock and
+ // configure for byte transfer mode
+ val =
+ NV_DRF_NUM(OWR, CONTROL, RD_DATA_SAMPLE_CLK, ReadDataClk) |
+ NV_DRF_NUM(OWR, CONTROL, PRESENCE_SAMPLE_CLK, PresenceClk) |
+ NV_DRF_DEF(OWR, CONTROL, DATA_TRANSFER_MODE, BYTE_TRANSFER_MODE) |
+ NV_DRF_DEF(OWR, CONTROL, RD_MEM_CRC_REQ, CRC_READ) |
+ NV_DRF_DEF(OWR, CONTROL, GO, START_PRESENCE_PULSE);
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, CONTROL, val);
+
+ // wait for the read to complete
+ status = NvOsSemaphoreWaitTimeout(pOwrInfo->OwrSyncSemaphore,
+ OWR_TRANSFER_TIMEOUT_MILLI_SEC);
+ if (status == NvSuccess)
+ {
+ if (pOwrInfo->OwrTransferStatus & OWR_BYTE_TRANSFER_ERRORS)
+ {
+ NvRmModuleReset(pOwrInfo->hRmDevice,
+ NVRM_MODULE_ID(NvRmModuleID_OneWire, pOwrInfo->Instance));
+ OWR_PRINT(("RM_OWR Byte mode error interrupt[0x%x]\n",
+ pOwrInfo->OwrTransferStatus));
+ return NvError_OwrReadFailed;
+ }
+ else if (pOwrInfo->OwrTransferStatus & OwrIntrStatus_MemCmdDoneIntEnable)
+ {
+ // Read the data
+ if (Transaction.Flags == NvRmOwr_ReadAddress)
+ {
+ // Read and copy and the ROM ID
+ val = OWR_REGR(pOwrInfo->pOwrVirtualAddress, READ_ROM0);
+ NvOsMemcpy(pBuffer, &val, 4);
+ pBuffer += 4;
+
+ val = OWR_REGR(pOwrInfo->pOwrVirtualAddress, READ_ROM1);
+ NvOsMemcpy(pBuffer, &val, 4);
+ }
+ else if (Transaction.Flags == NvRmOwr_MemRead)
+ {
+ val = OWR_REGR(pOwrInfo->pOwrVirtualAddress, BYTE_CNT);
+ val = NV_DRF_VAL(OWR, BYTE_CNT, RECEIVED, val);
+ /** Decrement the number of bytes to read count as it includes
+ * one byte CRC.
+ */
+ val--;
+
+ BytesToRead = (val > NumBytes) ? NumBytes : val;
+ WordsToRead = BytesToRead / OWR_FIFO_WORD_SIZE;
+ for (i = 0; i < WordsToRead; i++)
+ {
+ val = OWR_REGR(pOwrInfo->pOwrVirtualAddress, RX_FIFO);
+ NvOsMemcpy(pBuffer, &val, OWR_FIFO_WORD_SIZE);
+ pBuffer += OWR_FIFO_WORD_SIZE;
+ }
+
+ BytesToRead = (BytesToRead % OWR_FIFO_WORD_SIZE);
+ if (BytesToRead)
+ {
+ val = OWR_REGR(pOwrInfo->pOwrVirtualAddress, RX_FIFO);
+ NvOsMemcpy(pBuffer, &val, BytesToRead);
+ }
+ }
+ }
+ else
+ {
+ OWR_PRINT(("RM_OWR Byte mode spurious interrupt[0x%x]\n",
+ pOwrInfo->OwrTransferStatus));
+ NV_ASSERT(!"RM_OWR spurious interrupt\n");
+ NvRmModuleReset(pOwrInfo->hRmDevice,
+ NVRM_MODULE_ID(NvRmModuleID_OneWire, pOwrInfo->Instance));
+ return NvError_OwrReadFailed;
+ }
+ }
+ return status;
+}
+
+static NvError
+PrivOwrWriteFifo(
+ NvRmOwrController* pOwrInfo,
+ NvU8* pBuffer,
+ NvRmOwrTransactionInfo Transaction,
+ const NvOdmQueryOwrDeviceInfo* pOdmInfo,
+ NvU32 NumBytes)
+{
+ NvU32 val = 0;
+ NvError status = NvError_OwrWriteFailed;
+ NvU32 ReadDataClk = OWR_DEFAULT_READ_DTA_CLK_VALUE;
+ NvU32 PresenceClk = OWR_DEFAULT_PRESENCE_CLK_VALUE;
+ NvU32 i = 0;
+
+ if (pOdmInfo)
+ {
+ ReadDataClk = pOdmInfo->ReadDataSampleClk;
+ PresenceClk = pOdmInfo->PresenceSampleClk;
+ }
+
+ // Configure the number of bytes to write
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, EPROM, (NumBytes - 1));
+
+ // Write data into the FIFO
+ for (i = NumBytes; i > 0; )
+ {
+ NvU32 BytesToWrite = NV_MIN(sizeof(NvU32),i);
+ val = 0;
+ switch (BytesToWrite)
+ {
+ case 4: val |= pBuffer[3]; i--; // fallthrough
+ case 3: val <<=8; val |= pBuffer[2]; i--; // fallthrough
+ case 2: val <<=8; val |= pBuffer[1]; i--; // fallthrough
+ case 1: val <<=8; val |= pBuffer[0]; i--;
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, TX_FIFO, val);
+ pBuffer += BytesToWrite;
+ break;
+ }
+ }
+
+ // Configure the read, presence sample clock and
+ // configure for byte transfer mode
+ val =
+ NV_DRF_NUM(OWR, CONTROL, RD_DATA_SAMPLE_CLK, ReadDataClk) |
+ NV_DRF_NUM(OWR, CONTROL, PRESENCE_SAMPLE_CLK, PresenceClk) |
+ NV_DRF_DEF(OWR, CONTROL, DATA_TRANSFER_MODE, BYTE_TRANSFER_MODE) |
+ NV_DRF_DEF(OWR, CONTROL, GO, START_PRESENCE_PULSE);
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, CONTROL, val);
+
+ // wait for the write to complete
+ status = NvOsSemaphoreWaitTimeout(pOwrInfo->OwrSyncSemaphore,
+ OWR_TRANSFER_TIMEOUT_MILLI_SEC);
+ if (status == NvSuccess)
+ {
+ if (pOwrInfo->OwrTransferStatus & OWR_BYTE_TRANSFER_ERRORS)
+ {
+ NvRmModuleReset(pOwrInfo->hRmDevice,
+ NVRM_MODULE_ID(NvRmModuleID_OneWire, pOwrInfo->Instance));
+ OWR_PRINT(("RM_OWR Byte mode error interrupt[0x%x]\n",
+ pOwrInfo->OwrTransferStatus));
+ return NvError_OwrWriteFailed;
+ }
+ else if (pOwrInfo->OwrTransferStatus & OwrIntrStatus_MemCmdDoneIntEnable)
+ {
+ val = OWR_REGR(pOwrInfo->pOwrVirtualAddress, BYTE_CNT);
+ val = NV_DRF_VAL(OWR, BYTE_CNT, RECEIVED, val);
+
+ /** byte count includes ROM, Mem command size and Memory
+ * address size. So, subtract ROM, Mem Command size and
+ * memory address size from byte count.
+ */
+ val -= OWR_MEM_CMD_SIZE_BYTES;
+ val -= OWR_MEM_CMD_SIZE_BYTES;
+ val -= OWR_DEFAULT_OFFSET_SIZE_BYTES;
+
+ /** Assert if the actual bytes written is
+ * not equal to the bytes written
+ */
+ NV_ASSERT(val == NumBytes);
+ }
+ else
+ {
+ OWR_PRINT(("RM_OWR Byte mode spurious interrupt[0x%x]\n",
+ pOwrInfo->OwrTransferStatus));
+ NV_ASSERT(!"RM_OWR spurious interrupt\n");
+ NvRmModuleReset(pOwrInfo->hRmDevice,
+ NVRM_MODULE_ID(NvRmModuleID_OneWire, pOwrInfo->Instance));
+ return NvError_OwrWriteFailed;
+ }
+ }
+ return status;
+}
+
+/****************************************************************************/
+
+static void OwrIsr(void* args)
+{
+ NvRmOwrController* pOwrInfo = args;
+ NvU32 IntStatus;
+
+ // Read the interrupt status register
+ IntStatus = OWR_REGR(pOwrInfo->pOwrVirtualAddress, INTR_STATUS);
+
+ // Save the status
+ pOwrInfo->OwrTransferStatus = IntStatus;
+
+ // Clear the interrupt status register
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, INTR_STATUS, IntStatus);
+
+ // Signal the sema
+ NvOsSemaphoreSignal(pOwrInfo->OwrSyncSemaphore);
+ NvRmInterruptDone(pOwrInfo->OwrInterruptHandle);
+}
+
+static void AP20RmOwrClose(NvRmOwrController *pOwrInfo)
+{
+ if (pOwrInfo->OwrSyncSemaphore)
+ {
+#if !OWR_BIT_TRANSFER_POLLING_MODE
+ NvRmInterruptUnregister(pOwrInfo->hRmDevice,
+ pOwrInfo->OwrInterruptHandle);
+#endif
+ NvOsSemaphoreDestroy(pOwrInfo->OwrSyncSemaphore);
+ pOwrInfo->OwrSyncSemaphore = NULL;
+ pOwrInfo->OwrInterruptHandle = NULL;
+ }
+ pOwrInfo->read = 0;
+ pOwrInfo->write = 0;
+ pOwrInfo->close = 0;
+ NvRmPhysicalMemUnmap(pOwrInfo->pOwrVirtualAddress, pOwrInfo->OwrBankSize);
+}
+
+static NvError
+AP20RmOwrRead(
+ NvRmOwrController* pOwrInfo,
+ NvU8* pBuffer,
+ NvRmOwrTransactionInfo Transaction)
+{
+ NvU32 val = 0;
+ NvError status = NvError_Timeout;
+ NvBool IsByteModeSupported = NV_FALSE;
+ const NvOdmQueryOwrDeviceInfo* pOwrOdmInfo = NULL;
+ NvU32 ReadDataClk = OWR_DEFAULT_READ_DTA_CLK_VALUE;
+ NvU32 PresenceClk = OWR_DEFAULT_PRESENCE_CLK_VALUE;
+ NvU32 DeviceOffsetSize = OWR_DEFAULT_OFFSET_SIZE_BYTES;
+ NvU32 TotalBytesToRead = 0;
+ NvU32 BytesRead = 0;
+ NvU32 FifoSize = 0;
+ NvU32 i = 0;
+ NvU8* pReadPtr = pBuffer;
+
+ if ((Transaction.Flags == NvRmOwr_MemRead) && (!Transaction.NumBytes))
+ {
+ return NvError_BadParameter;
+ }
+
+ pOwrOdmInfo = NvOdmQueryGetOwrDeviceInfo(pOwrInfo->Instance);
+ if (!pOwrOdmInfo)
+ {
+ IsByteModeSupported = NV_FALSE;
+
+ // program the default timing registers
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, WR_RD_TCTL, 0x13fde0f7);
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, RST_PRESENCE_TCTL, 0x787bbfdf);
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, PROG_PULSE_TCTL, 0x01e05555);
+ }
+ else
+ {
+ IsByteModeSupported = pOwrOdmInfo->IsByteModeSupported;
+ ReadDataClk = pOwrOdmInfo->ReadDataSampleClk;
+ PresenceClk = pOwrOdmInfo->PresenceSampleClk;
+ DeviceOffsetSize = pOwrOdmInfo->AddressSize;
+
+ // program the timing registers
+ val =
+ NV_DRF_NUM(OWR, WR_RD_TCTL, TSLOT, pOwrOdmInfo->TSlot) |
+ NV_DRF_NUM(OWR, WR_RD_TCTL, TLOW1, pOwrOdmInfo->TLow1) |
+ NV_DRF_NUM(OWR, WR_RD_TCTL, TLOW0, pOwrOdmInfo->TLow0) |
+ NV_DRF_NUM(OWR, WR_RD_TCTL, TRDV, pOwrOdmInfo->TRdv) |
+ NV_DRF_NUM(OWR, WR_RD_TCTL, TRELEASE, pOwrOdmInfo->TRelease) |
+ NV_DRF_NUM(OWR, WR_RD_TCTL, TSU, pOwrOdmInfo->Tsu);
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, WR_RD_TCTL, val);
+
+ val =
+ NV_DRF_NUM(OWR, RST_PRESENCE_TCTL, TRSTH, pOwrOdmInfo->TRsth) |
+ NV_DRF_NUM(OWR, RST_PRESENCE_TCTL, TRSTL, pOwrOdmInfo->TRstl) |
+ NV_DRF_NUM(OWR, RST_PRESENCE_TCTL, TPDH, pOwrOdmInfo->Tpdh) |
+ NV_DRF_NUM(OWR, RST_PRESENCE_TCTL, TPDL, pOwrOdmInfo->Tpdl);
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, RST_PRESENCE_TCTL, val);
+
+ val =
+ NV_DRF_NUM(OWR, PROG_PULSE_TCTL, TPD, pOwrOdmInfo->Tpd) |
+ NV_DRF_NUM(OWR, PROG_PULSE_TCTL, TDV, pOwrOdmInfo->Tdv) |
+ NV_DRF_NUM(OWR, PROG_PULSE_TCTL, TRP, pOwrOdmInfo->Trp) |
+ NV_DRF_NUM(OWR, PROG_PULSE_TCTL, TFP, pOwrOdmInfo->Tfp) |
+ NV_DRF_NUM(OWR, PROG_PULSE_TCTL, TPP, pOwrOdmInfo->Tpp);
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, PROG_PULSE_TCTL, val);
+ }
+
+ if (!IsByteModeSupported)
+ {
+ // Bit Transfer Mode
+
+ // Enable the bit transfer done interrupt
+ PrivOwrEnableInterrupts(pOwrInfo, OwrIntrStatus_PresenceDoneIntEnable);
+ pOwrInfo->OwrTransferStatus = 0;
+
+ // Configure for presence
+ val =
+ NV_DRF_NUM(OWR, CONTROL, RD_DATA_SAMPLE_CLK, ReadDataClk) |
+ NV_DRF_NUM(OWR, CONTROL, PRESENCE_SAMPLE_CLK, PresenceClk) |
+ NV_DRF_DEF(OWR, CONTROL, DATA_TRANSFER_MODE, BIT_TRANSFER_MODE) |
+ NV_DRF_DEF(OWR, CONTROL, GO, START_PRESENCE_PULSE);
+
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, CONTROL, val);
+
+ // Check for presence
+ status = PrivOwrCheckBitTransferDone(pOwrInfo,
+ OwrIntrStatus_PresenceDoneIntEnable);
+ if (status != NvSuccess)
+ {
+ return status;
+ }
+
+ // Enable the bit transfer done interrupt
+ PrivOwrEnableInterrupts(pOwrInfo,
+ OwrIntrStatus_BitTransferDoneIntEnable);
+
+
+ if (Transaction.Flags == NvRmOwr_ReadAddress)
+ {
+ // Send the ROM Read Command
+ NV_ASSERT_SUCCESS(PrivOwrSendCommand(pOwrInfo,
+ OWR_ROM_READ_COMMAND));
+
+ // Read the ROM ID
+ status = PrivOwrReadData(pOwrInfo, pReadPtr, OWR_ROM_ID_SIZE_BYTES);
+ }
+ else if (Transaction.Flags == NvRmOwr_MemRead)
+ {
+ // Skip the ROM Read Command
+ NV_ASSERT_SUCCESS(
+ PrivOwrSendCommand(pOwrInfo, OWR_ROM_SKIP_COMMAND));
+
+ // Send the Mem Read Command
+ NV_ASSERT_SUCCESS(
+ PrivOwrSendCommand(pOwrInfo, OWR_MEM_READ_COMMAND));
+
+ // Send offset in memory
+ for (i = 0; i < DeviceOffsetSize; i++)
+ {
+ val = (Transaction.Offset >> i) & 0xFF;
+ NV_ASSERT_SUCCESS(PrivOwrSendCommand(pOwrInfo, val));
+ }
+
+ // Read the CRC
+ NV_ASSERT_SUCCESS(
+ PrivOwrReadData(pOwrInfo, pReadPtr, OWR_CRC_SIZE_BYTES));
+
+ // TODO: Need to compute the CRC and compare with the CRC read
+
+ // Read Mem data
+ status = PrivOwrReadData(pOwrInfo, pReadPtr, Transaction.NumBytes);
+ }
+
+ return status;
+ }
+ else
+ {
+ // Byte transfer Mode
+
+ // Enable the interrupts
+ PrivOwrEnableInterrupts(pOwrInfo,
+ (OwrIntrStatus_PresenceErrIntEnable |
+ OwrIntrStatus_CrcErrIntEnable |
+ OwrIntrStatus_MemWriteErrIntEnable |
+ OwrIntrStatus_ErrCommandIntEnable |
+ OwrIntrStatus_MemCmdDoneIntEnable|
+ OwrIntrStatus_TxfOvfIntEnable |
+ OwrIntrStatus_RxfUnrIntEnable));
+
+ // Configure the Rom command and the eeprom starting address
+ val = (
+ NV_DRF_NUM(OWR, COMMAND, ROM_CMD, OWR_ROM_READ_COMMAND) |
+ NV_DRF_NUM(OWR, COMMAND, MEM_CMD, OWR_MEM_READ_COMMAND) |
+ NV_DRF_NUM(OWR, COMMAND, MEM_ADDR, Transaction.Offset));
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, COMMAND, val);
+
+ /** We can't porgam ROM ID read alone, memory read should also be given
+ * along with ROM ID read. So, preogramming memory read of 1byte even
+ * for ROM ID read.
+ */
+ TotalBytesToRead = (Transaction.NumBytes) ? Transaction.NumBytes : 1;
+ FifoSize = (OWR_FIFO_DEPTH * OWR_FIFO_WORD_SIZE);
+ while(TotalBytesToRead)
+ {
+ BytesRead =
+ (TotalBytesToRead > FifoSize) ? FifoSize : TotalBytesToRead;
+ pOwrInfo->OwrTransferStatus = 0;
+ status =
+ PrivOwrReadFifo(pOwrInfo, pReadPtr, Transaction,
+ pOwrOdmInfo, BytesRead);
+ if (status != NvSuccess)
+ {
+ break;
+ }
+ TotalBytesToRead -= BytesRead;
+ pReadPtr += BytesRead;
+ }
+
+ }
+ return status;
+}
+
+static NvError
+AP20RmOwrWrite(
+ NvRmOwrController *pOwrInfo,
+ NvU8* pBuffer,
+ NvRmOwrTransactionInfo Transaction)
+{
+ NvU32 val = 0;
+ NvError status = NvError_Timeout;
+ NvBool IsByteModeSupported = NV_FALSE;
+ const NvOdmQueryOwrDeviceInfo* pOwrOdmInfo = NULL;
+ NvU32 TotalBytesToWrite = 0;
+ NvU32 BytesWritten = 0;
+ NvU32 FifoSize = 0;
+ NvU8* pWritePtr = pBuffer;
+
+ if ((Transaction.Flags == NvRmOwr_MemWrite) && (!Transaction.NumBytes))
+ {
+ return NvError_BadParameter;
+ }
+
+ pOwrOdmInfo = NvOdmQueryGetOwrDeviceInfo(pOwrInfo->Instance);
+ if (!pOwrOdmInfo)
+ {
+ IsByteModeSupported = NV_FALSE;
+
+ // program the default timing registers
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, WR_RD_TCTL, 0x13fde0f7);
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, RST_PRESENCE_TCTL, 0x787bbfdf);
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, PROG_PULSE_TCTL, 0x01e05555);
+ }
+ else
+ {
+ IsByteModeSupported = pOwrOdmInfo->IsByteModeSupported;
+
+ // program the timing registers
+ val =
+ NV_DRF_NUM(OWR, WR_RD_TCTL, TSLOT, pOwrOdmInfo->TSlot) |
+ NV_DRF_NUM(OWR, WR_RD_TCTL, TLOW1, pOwrOdmInfo->TLow1) |
+ NV_DRF_NUM(OWR, WR_RD_TCTL, TLOW0, pOwrOdmInfo->TLow0) |
+ NV_DRF_NUM(OWR, WR_RD_TCTL, TRDV, pOwrOdmInfo->TRdv) |
+ NV_DRF_NUM(OWR, WR_RD_TCTL, TRELEASE, pOwrOdmInfo->TRelease) |
+ NV_DRF_NUM(OWR, WR_RD_TCTL, TSU, pOwrOdmInfo->Tsu);
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, WR_RD_TCTL, val);
+
+ val =
+ NV_DRF_NUM(OWR, RST_PRESENCE_TCTL, TRSTH, pOwrOdmInfo->TRsth) |
+ NV_DRF_NUM(OWR, RST_PRESENCE_TCTL, TRSTL, pOwrOdmInfo->TRstl) |
+ NV_DRF_NUM(OWR, RST_PRESENCE_TCTL, TPDH, pOwrOdmInfo->Tpdh) |
+ NV_DRF_NUM(OWR, RST_PRESENCE_TCTL, TPDL, pOwrOdmInfo->Tpdl);
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, RST_PRESENCE_TCTL, val);
+
+ val =
+ NV_DRF_NUM(OWR, PROG_PULSE_TCTL, TPD, pOwrOdmInfo->Tpd) |
+ NV_DRF_NUM(OWR, PROG_PULSE_TCTL, TDV, pOwrOdmInfo->Tdv) |
+ NV_DRF_NUM(OWR, PROG_PULSE_TCTL, TRP, pOwrOdmInfo->Trp) |
+ NV_DRF_NUM(OWR, PROG_PULSE_TCTL, TFP, pOwrOdmInfo->Tfp) |
+ NV_DRF_NUM(OWR, PROG_PULSE_TCTL, TPP, pOwrOdmInfo->Tpp);
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, PROG_PULSE_TCTL, val);
+ }
+
+ // Only Byte transfer Mode is supported for writes
+ NV_ASSERT(IsByteModeSupported == NV_TRUE);
+
+ // Enable the interrupts
+ PrivOwrEnableInterrupts(pOwrInfo,
+ (OwrIntrStatus_PresenceErrIntEnable |
+ OwrIntrStatus_CrcErrIntEnable |
+ OwrIntrStatus_MemWriteErrIntEnable |
+ OwrIntrStatus_ErrCommandIntEnable |
+ OwrIntrStatus_MemCmdDoneIntEnable|
+ OwrIntrStatus_TxfOvfIntEnable |
+ OwrIntrStatus_RxfUnrIntEnable));
+
+ // Configure the Rom command and the eeprom starting address
+ val = (
+ NV_DRF_NUM(OWR, COMMAND, ROM_CMD, OWR_ROM_READ_COMMAND) |
+ NV_DRF_NUM(OWR, COMMAND, MEM_CMD, OWR_MEM_WRITE_COMMAND) |
+ NV_DRF_NUM(OWR, COMMAND, MEM_ADDR, Transaction.Offset));
+ OWR_REGW(pOwrInfo->pOwrVirtualAddress, COMMAND, val);
+
+ TotalBytesToWrite = Transaction.NumBytes;
+ FifoSize = (OWR_FIFO_DEPTH * OWR_FIFO_WORD_SIZE);
+ while(TotalBytesToWrite)
+ {
+ BytesWritten =
+ (TotalBytesToWrite > FifoSize) ? FifoSize : TotalBytesToWrite;
+ pOwrInfo->OwrTransferStatus = 0;
+ status =
+ PrivOwrWriteFifo(pOwrInfo, pWritePtr, Transaction,
+ pOwrOdmInfo, BytesWritten);
+ if (status != NvSuccess)
+ {
+ break;
+ }
+ TotalBytesToWrite -= BytesWritten;
+ pWritePtr += BytesWritten;
+ }
+ return status;
+}
+
+NvError AP20RmOwrOpen(NvRmOwrController *pOwrInfo)
+{
+ NvError status = NvSuccess;
+
+ NV_ASSERT(pOwrInfo != NULL);
+
+ /* Polulate the structures */
+ pOwrInfo->read = AP20RmOwrRead;
+ pOwrInfo->write = AP20RmOwrWrite;
+ pOwrInfo->close = AP20RmOwrClose;
+
+ NvRmModuleGetBaseAddress(
+ pOwrInfo->hRmDevice,
+ NVRM_MODULE_ID(NvRmModuleID_OneWire, pOwrInfo->Instance),
+ &pOwrInfo->OwrPhysicalAddress,
+ &pOwrInfo->OwrBankSize);
+
+ NV_ASSERT_SUCCESS(NvRmPhysicalMemMap(
+ pOwrInfo->OwrPhysicalAddress,
+ pOwrInfo->OwrBankSize, NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached,
+ (void **)&pOwrInfo->pOwrVirtualAddress));
+
+ // Create the sync semaphore
+ status = NvOsSemaphoreCreate( &pOwrInfo->OwrSyncSemaphore, 0);
+
+ if (pOwrInfo->OwrSyncSemaphore)
+ {
+ NvU32 IrqList;
+ NvOsInterruptHandler IntHandlers;
+
+ IntHandlers = OwrIsr;
+ IrqList = NvRmGetIrqForLogicalInterrupt(
+ pOwrInfo->hRmDevice,
+ NVRM_MODULE_ID(pOwrInfo->ModuleId, pOwrInfo->Instance), 0);
+
+#if !OWR_BIT_TRANSFER_POLLING_MODE
+ status = NvRmInterruptRegister(pOwrInfo->hRmDevice, 1, &IrqList,
+ &IntHandlers, pOwrInfo,
+ &pOwrInfo->OwrInterruptHandle, NV_TRUE);
+#endif
+
+ if (status != NvSuccess)
+ {
+ NV_ASSERT(!"OWR module interrupt register failed!");
+ NvOsSemaphoreDestroy(pOwrInfo->OwrSyncSemaphore);
+ pOwrInfo->OwrSyncSemaphore = 0;
+ }
+ }
+
+ return status;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_pcie.c b/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_pcie.c
new file mode 100644
index 000000000000..47e531386afa
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_pcie.c
@@ -0,0 +1,2122 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvos.h"
+#include "nvrm_pcie.h"
+#include "nvrm_module_private.h"
+#include "ap20rm_pcie_private.h"
+#include "nvrm_hardware_access.h"
+#include "nvrm_hwintf.h"
+#include "nvassert.h"
+#include "nvrm_clocks.h"
+#include "nvrm_interrupt.h"
+#include "nvrm_processor.h"
+#include "nvrm_memmgr.h"
+#include "nvrm_memctrl.h"
+#include "nvodm_pmu.h"
+#include "nvodm_query_discovery.h"
+#include "nvrm_pinmux.h"
+
+/* PCIE driver state.
+ * FIXME move to a structure if needed.
+ * */
+/* static variables holding the configuration space of the 2 root port bridges */
+static NvU32 s_pciConfig0[NV_PROJ__PCIE2_RP_ERPTCAP/4]; // 256 bytes
+static NvU32 s_pciConfig1[NV_PROJ__PCIE2_RP_ERPTCAP/4]; // 256 bytes
+static NvBool s_PcieRootPortPresent = NV_FALSE;
+static NvBool s_PcieRootPort0Present = NV_FALSE;
+static NvBool s_PcieRootPort1Present = NV_FALSE;
+
+static NvU8 *s_pcieBase = NULL;
+static NvRmMemHandle s_pcieMsiMemoryHandle = NULL;
+static NvU32 s_pcieSize = 0;
+static NvU32 s_pcieMsiVectorCount = 0;
+static NvRmPhysAddr s_pciePhysical = 0;
+static NvRmPciDevice pciDevices[MAX_PCI_DEVICES];
+static NvRmPciDevice *s_rp0 = NULL;
+static NvU32 s_NumPciDevices = 0;
+static NvOsInterruptHandle s_pcieInterruptHandle = NULL;
+static NvOsInterruptHandle s_pcieMSIHandle = NULL;
+static NvU32 s_PowerClientId;
+#define ROOTPORT_0_BUS 0x00
+#define ROOTPORT_0_SUBBUS 0x1f
+#define ROOTPORT_1_BUS (ROOTPORT_0_SUBBUS + 1)
+#define ROOTPORT_1_SUBBUS (ROOTPORT_1_BUS + ROOTPORT_0_SUBBUS - ROOTPORT_0_BUS) //same size as RP0
+
+//#define SINGLE_PORT
+
+//return the matching "device" entry.
+//assumes they've been loaded already
+static NvRmPciDevice *
+pcie_GetDevice(NvU32 function_device_bus);
+
+static void
+pcie_ConfigureDeviceBAR(NvRmDeviceHandle rm, NvRmPciDevice *dev);
+
+static
+NvRmPciDevice *pcie_allocDevice(NvRmDeviceHandle rm);
+
+static void
+pcie_ConfigureMSI(NvRmDeviceHandle rm, NvRmPciDevice *device);
+
+static
+void pcie_scanbus(NvRmDeviceHandle rm, NvRmPciDevice *dev_parent);
+
+//interrupt handler for MSI interrupts
+void NvRmPrivHandlePcieMSI(void *arg);
+
+static
+void pcie_businit(NvRmDeviceHandle rm);
+
+static
+void pcie_businitx(NvRmDeviceHandle rm, int rpindex);
+
+static
+void pcie_buswalk(NvRmDeviceHandle rm, NvRmPciDevice *dev);
+
+static
+void pcie_readmultiple(NvU8 *dest, NvU8 *src, NvU32 len);
+
+static
+void NvRmPrivHandlePcieInterrupt(void *arg);
+
+static
+void pcie_ReadRPConfig(NvRmDeviceHandle hRm, NvU8 *data, NvU32 len, NvU32 offset, NvU32 controller_number);
+
+static
+void pcie_WriteRPConfig(NvRmDeviceHandle hRm, NvU8 *data, NvU32 len, NvU32 offset, NvU32 controller_number);
+typedef NvError (*NvRmMSIHandler)(NvU8, void *);
+
+#define MAX_MSI_HANDLERS 256
+#define MSI_MEMORY_ALIGNMENT 16
+#define MAX_LEGACY_HANDLERS 16
+
+struct {
+ NvOsSemaphoreHandle sem;
+} static MSIHandlers[MAX_MSI_HANDLERS];
+
+struct {
+ NvOsSemaphoreHandle sem;
+} static LegacyHandlers[MAX_LEGACY_HANDLERS];
+
+static void pcie_readmultiple(NvU8 *dest, NvU8 *src, NvU32 len)
+{
+ NvU32 temp;
+
+ while (((NvU32)src & 0x3) && len)
+ {
+ *dest++ = NV_READ8(src);
+ src++;
+ len--;
+ }
+
+ while (len/4)
+ {
+ NvU8 *tempPtr = (NvU8 *)&temp;
+
+ temp = NV_READ32(src);
+ /* Dest unaligned? */
+ if (!((NvU32)dest & 0x3))
+ {
+ *(NvU32 *)dest = temp;
+ dest += 4;
+ } else
+ {
+ *dest++ = *tempPtr++;
+ *dest++ = *tempPtr++;
+ *dest++ = *tempPtr++;
+ *dest++ = *tempPtr++;
+ }
+ src += 4;
+ len -= 4;
+ }
+
+ while (len)
+ {
+ *dest++ = NV_READ8(src);
+ src++;
+ len--;
+ }
+}
+
+static void pcie_writemultiple(NvU8 *dest, NvU8 *src, NvU32 len);
+static void pcie_writemultiple(NvU8 *dest, NvU8 *src, NvU32 len)
+{
+ NvU32 temp;
+
+ while (((NvU32)dest & 0x3) && len)
+ {
+ NV_WRITE08(dest, *src);
+ src++;
+ dest++;
+ len--;
+ }
+
+ while (len/4)
+ {
+ NvU8 *tempPtr = (NvU8 *)&temp;
+ /* Source unaligned? */
+ if (!((NvU32)src & 0x3))
+ {
+ temp = *(NvU32 *)src;
+ src += 4;
+ } else
+ {
+ *tempPtr++ = *src++;
+ *tempPtr++ = *src++;
+ *tempPtr++ = *src++;
+ *tempPtr++ = *src++;
+ }
+ NV_WRITE32(dest, temp);
+ dest += 4;
+ len -= 4;
+ }
+
+ while (len)
+ {
+ NV_WRITE08(dest, *src);
+ dest++;
+ src++;
+ len--;
+ }
+}
+
+
+
+static void
+pcie_regw(NvRmDeviceHandle hRm, PcieRegType type, NvU32 offset, NvU32 data);
+
+static void
+pcie_regw(NvRmDeviceHandle hRm, PcieRegType type, NvU32 offset, NvU32 data)
+{
+ switch (type)
+ {
+ case PcieRegType_AFI:
+ offset += NV_PCIE_AXI_AFI_REGS_OFSET;
+ break;
+ case PcieRegType_CFG0:
+ offset += NV_PCIE_AXI_RP_T0C0_OFFSET;
+ break;
+ case PcieRegType_CFG1:
+ offset += NV_PCIE_AXI_RP_T0C1_OFFSET;
+ break;
+ case PcieRegType_PADS:
+ offset += NV_PCIE_AXI_PADS_OFSET;
+ break;
+ case PcieRegType_Force32:
+ default:
+ NV_ASSERT(0);
+ return;
+ }
+
+ NV_ASSERT(s_pcieBase != NULL);
+ NV_WRITE32(s_pcieBase + offset, data);
+ return;
+}
+
+static NvU32
+pcie_regr(NvRmDeviceHandle hRm, PcieRegType type, NvU32 offset);
+
+static NvU32 pcie_regr(NvRmDeviceHandle hRm, PcieRegType type, NvU32 offset)
+{
+ switch (type)
+ {
+ case PcieRegType_AFI:
+ offset += NV_PCIE_AXI_AFI_REGS_OFSET;
+ break;
+ case PcieRegType_CFG0:
+ offset += NV_PCIE_AXI_RP_T0C0_OFFSET;
+ break;
+ case PcieRegType_CFG1:
+ offset += NV_PCIE_AXI_RP_T0C1_OFFSET;
+ break;
+ case PcieRegType_PADS:
+ offset += NV_PCIE_AXI_PADS_OFSET;
+ break;
+ case PcieRegType_Force32:
+ default:
+ NV_ASSERT(0);
+ return 0;
+ }
+ NV_ASSERT(s_pcieBase != NULL);
+ return NV_READ32(s_pcieBase + offset);
+}
+
+NvError NvRmReadWriteConfigSpace(
+ NvRmDeviceHandle hDeviceHandle,
+ NvU32 function_device_bus,
+ NvRmPcieAccessType type,
+ NvU32 offset,
+ NvU8 *Data,
+ NvU32 DataLen )
+{
+ NvBool extendedConfig;
+ NvU32 addr;
+ NvU32 bus;
+ NvU32 device;
+ NvU32 function;
+
+ bus = function_device_bus & 0xff;
+ device = (function_device_bus >> 8) & 0x1f;
+ function = (function_device_bus >> 16) & 0x7;
+
+ if (DataLen == 0)
+ return NvSuccess;
+
+ if (Data == NULL)
+ return NvError_BadParameter;
+
+ /* No device attached, so bailout */
+ if (!s_PcieRootPortPresent)
+ return NvError_DeviceNotFound;
+
+ /* We don't support more than 256 devices*/
+ NV_ASSERT((bus) < NVRM_PCIE_MAX_DEVICES);
+ //if someone tries to access a bus outside of the range, they get an abort-crash
+ //handle that more gracefully
+ if(! ( (s_PcieRootPort0Present && (bus>ROOTPORT_0_BUS) && (bus <=ROOTPORT_0_SUBBUS))
+ || (s_PcieRootPort1Present && (bus>ROOTPORT_1_BUS) && (bus <=ROOTPORT_1_SUBBUS)) ))
+ {
+ //handle accessing the root ports more gracefully
+ if((bus==ROOTPORT_0_BUS) && s_PcieRootPort0Present && (device==0) && (function==0))
+ {
+ if(type==NvRmPcieAccessType_Read)
+ pcie_ReadRPConfig(hDeviceHandle, Data, DataLen, offset, 0);
+ else
+ pcie_WriteRPConfig(hDeviceHandle, Data, DataLen, offset, 0);
+
+ return NvSuccess;
+ }
+ if((bus==ROOTPORT_1_BUS) && s_PcieRootPort1Present && (device==0) && (function==0))
+ {
+ if(type==NvRmPcieAccessType_Read)
+ pcie_ReadRPConfig(hDeviceHandle, Data, DataLen, offset, 1);
+ else
+ pcie_WriteRPConfig(hDeviceHandle, Data, DataLen, offset, 1);
+ return NvSuccess;
+ }
+
+ return NvError_BadParameter;
+ }
+
+ if (offset + DataLen < 256)
+ {
+ extendedConfig = NV_FALSE;
+ } else if (offset + DataLen < 4096 )
+ {
+ extendedConfig = NV_TRUE;
+ } else
+ {
+ NV_ASSERT(!"Illegal config access\n");
+ return NvError_BadParameter;
+ }
+
+ /* Cannot straddle between basic config and extended config */
+ if (offset < 256 && offset + DataLen > 256)
+ {
+ return NvError_BadParameter;
+ }
+
+ addr = (NvU32)s_pcieBase;
+ if (extendedConfig)
+ {
+ addr += NVRM_PCIE_EXTENDED_CONFIG_OFFSET;
+ } else
+ {
+ addr += NVRM_PCIE_CONFIG_OFFSET;
+ }
+
+ /* 16:24 bits are interpreted as bus number.
+ * 11:15 bits are interpreted as device number
+ * 8:11 bits are interpreted as function number */
+ addr += bus << 16;
+ addr += device << 11;
+ addr += function << 8;
+ addr += offset;
+
+ if (type == NvRmPcieAccessType_Read)
+ {
+ pcie_readmultiple(Data, (NvU8 *)addr, DataLen);
+ } else
+ {
+ NV_ASSERT(type == NvRmPcieAccessType_Write);
+ pcie_writemultiple((NvU8 *)addr, Data, DataLen);
+ }
+ return NvSuccess;
+}
+
+static
+void pcie_ReadRPConfig(NvRmDeviceHandle hRm, NvU8 *data, NvU32 len, NvU32 offset, NvU32 controller_numer)
+{
+ NvU8 *addr;
+
+ addr = s_pcieBase;
+ if (controller_numer == 0)
+ {
+ addr += NV_PCIE_AXI_RP_T0C0_OFFSET;
+ } else if (controller_numer == 1)
+ {
+ addr += NV_PCIE_AXI_RP_T0C1_OFFSET;
+ } else
+ {
+ NV_ASSERT(!"Only 2 controllers in AP20");
+ return;
+ }
+
+ addr += offset;
+
+ pcie_readmultiple(data, addr, len);
+ return;
+}
+
+static
+void pcie_WriteRPConfig(NvRmDeviceHandle hRm, NvU8 *data, NvU32 len, NvU32 offset, NvU32 controller_numer)
+{
+ NvU8 *addr;
+
+ addr = s_pcieBase;
+ if (controller_numer == 0)
+ {
+ addr += NV_PCIE_AXI_RP_T0C0_OFFSET;
+ } else if (controller_numer == 1)
+ {
+ addr += NV_PCIE_AXI_RP_T0C1_OFFSET;
+ } else
+ {
+ NV_ASSERT(!"Only 2 controllers in AP20");
+ return;
+ }
+
+ addr += offset;
+
+ pcie_writemultiple(addr, data, len);
+ return;
+}
+
+static
+void pcie_setupAfiAddressTranslations(NvRmDeviceHandle hRm);
+
+static
+void pcie_setupAfiAddressTranslations(NvRmDeviceHandle hRm)
+{
+ NvU32 fpci_bar;
+ NvU32 size;
+ NvU32 axi_address;
+ NvU32 bar;
+ NvU32 msi_base;
+
+ /* Downstream address translations */
+
+ /* Config Bar */
+ bar = 0;
+ fpci_bar = ((NvU32)0xfdff << 16);
+ size = NVRM_PCIE_CONFIG_SIZE;
+ axi_address = s_pciePhysical + NVRM_PCIE_CONFIG_SIZE;
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AXI_BAR0_START_0 + bar * 4, axi_address);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AXI_BAR0_SZ_0 + bar * 4, size >> 12);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_FPCI_BAR0_0 + bar * 4, fpci_bar);
+
+ /* Extended config Bar */
+ bar = 1;
+ fpci_bar = ((NvU32)0xfe1 << 20);
+ size = NVRM_PCIE_EXTENDED_CONFIG_SIZE;
+ axi_address = s_pciePhysical + NVRM_PCIE_EXTENDED_CONFIG_OFFSET;
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AXI_BAR0_START_0 + bar * 4, axi_address);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AXI_BAR0_SZ_0 + bar * 4, size >> 12);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_FPCI_BAR0_0 + bar * 4, fpci_bar);
+
+ /* Downstream IO bar */
+ bar = 2;
+ fpci_bar = ((NvU32)0xfdfc << 16);
+ size = NVRM_PCIE_DOWNSTREAM_IO_SIZE;
+ axi_address = s_pciePhysical + NVRM_PCIE_DOWNSTREAM_IO_OFFSET;
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AXI_BAR0_START_0 + bar * 4, axi_address);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AXI_BAR0_SZ_0 + bar * 4, size >> 12);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_FPCI_BAR0_0 + bar * 4, fpci_bar);
+
+ /* Pre-fetchable memory BAR */
+ bar = 3;
+ /* Bits 39:12 of 40 bit FPCI address goes to bits 31:4 */
+ fpci_bar = (((FPCI_PREFETCH_MEMORY_OFFSET >> 12) & 0x0FFFFFFF) << 4);
+ fpci_bar |= 0x1;
+ size = NVRM_PCIE_PREFETCH_MEMORY_SIZE;
+ axi_address = s_pciePhysical + NVRM_PCIE_PREFETCH_MEMORY_OFFSET;
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AXI_BAR0_START_0 + bar * 4, axi_address);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AXI_BAR0_SZ_0 + bar * 4, size >> 12);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_FPCI_BAR0_0 + bar * 4, fpci_bar);
+
+ /* Non pre-fetchable memory BAR */
+ bar = 4;
+ /* Bits 39:12 of 40 bit FPCI address goes to bits 31:4 */
+ fpci_bar = (((FPCI_NON_PREFETCH_MEMORY_OFFSET >> 12) & 0x0FFFFFFF) << 4);
+ fpci_bar |= 0x1;
+ size = NVRM_PCIE_NON_PREFETCH_MEMORY_SIZE;
+ axi_address = s_pciePhysical + NVRM_PCIE_NON_PREFETCH_MEMORY_OFFSET;
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AXI_BAR0_START_0 + bar * 4, axi_address);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AXI_BAR0_SZ_0 + bar * 4, size >> 12);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_FPCI_BAR0_0 + bar * 4, fpci_bar);
+
+ /* NULL out the remaining BAR as it is not used */
+ fpci_bar = 0;
+ size = 0;
+ axi_address = 0;
+
+ bar = 5;
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AXI_BAR0_START_0 + bar * 4, axi_address);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AXI_BAR0_SZ_0 + bar * 4, size >> 12);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_FPCI_BAR0_0 + bar * 4, fpci_bar);
+
+ /* Upstream address translations. Map the entire system memory as cached */
+ pcie_regw(hRm, PcieRegType_AFI, AFI_CACHE_BAR0_ST_0, FPCI_SYSTEM_MEMORY_OFFSET);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_CACHE_BAR0_SZ_0, FPCI_SYSTEM_MEMORY_SIZE >> 12);
+
+ /* Second cache bar is not used */
+ pcie_regw(hRm, PcieRegType_AFI, AFI_CACHE_BAR1_ST_0, 0);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_CACHE_BAR1_SZ_0, 0);
+
+ /* Map MSI bar */
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_FPCI_BAR_ST_0, 0);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_BAR_SZ_0, 0);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_AXI_BAR_ST_0, 0);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_BAR_SZ_0, 0x00010000);
+ msi_base = NvRmMemPin(s_pcieMsiMemoryHandle);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_AXI_BAR_ST_0, msi_base);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_FPCI_BAR_ST_0, msi_base);
+
+ //enable all of the MSIs
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_EN_VEC0_0, 0xffffffff);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_EN_VEC1_0, 0xffffffff);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_EN_VEC2_0, 0xffffffff);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_EN_VEC3_0, 0xffffffff);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_EN_VEC4_0, 0xffffffff);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_EN_VEC5_0, 0xffffffff);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_EN_VEC6_0, 0xffffffff);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_EN_VEC7_0, 0xffffffff);
+
+ return;
+}
+
+static void
+NvRmPrivPciePowerControl(NvRmDeviceHandle hRm, NvBool bEnable)
+{
+ NvOdmServicesPmuHandle hPmu;
+ NvU32 SettlingTime;
+ NvOdmServicesPmuVddRailCapabilities RailCaps;
+ NvU32 i;
+ const NvOdmPeripheralConnectivity *pConnectivity = NULL;
+
+ pConnectivity = NvOdmPeripheralGetGuid(NV_VDD_PEX_CLK_ODM_ID);
+ if (pConnectivity == NULL)
+ return;
+
+ hPmu = NvOdmServicesPmuOpen();
+ for (i = 0; i < pConnectivity->NumAddress; i++)
+ {
+ if (pConnectivity->AddressList[i].Interface != NvOdmIoModule_Vdd)
+ continue;
+ NvOdmServicesPmuGetCapabilities(hPmu,
+ pConnectivity->AddressList[i].Address, &RailCaps);
+
+ if (bEnable)
+ {
+ NvOdmServicesPmuSetVoltage(hPmu,
+ pConnectivity->AddressList[i].Address,
+ RailCaps.requestMilliVolts, &SettlingTime);
+ } else
+ {
+ NvOdmServicesPmuSetVoltage(hPmu,
+ pConnectivity->AddressList[i].Address,
+ NVODM_VOLTAGE_OFF, &SettlingTime);
+ }
+ if (SettlingTime)
+ {
+ NvOdmOsWaitUS(SettlingTime);
+ }
+ }
+ NvOdmServicesPmuClose(hPmu);
+}
+
+
+static
+NvError CheckPcieRPx(NvRmDeviceHandle hRm, int index)
+{
+ NvError retval=NvError_DeviceNotFound;
+ NvU32 max_timeout=250; //.25 seconds - it's either fast or it doesn't work at all
+ NvU32 max_retries=2;
+ NvU32 retry_count;
+ NvU32 timeout;
+ NvU32 data0, data1;
+
+ //some local constants, depending on which RP this is
+ const NvU32 pcieregtype_cfgx= (index) ? PcieRegType_CFG1 : PcieRegType_CFG0;
+ const NvU32 afi_pexx_ctrl_0 = (index) ? AFI_PEX1_CTRL_0 : AFI_PEX0_CTRL_0;
+
+
+ switch(index) {
+ case 0:
+ case 1:
+ break;
+ default:
+ NV_ASSERT(!"bad index\n");
+ retval=NvError_BadParameter;
+ goto FAIL;
+ }
+
+ for(retry_count=0; retry_count<max_retries; retry_count++) {
+ timeout=max_timeout;
+
+ //if this isn't the first time through, we'll try resetting the port
+ //and hope it wakes up this time.
+ if(retry_count > 0) {
+ NvOsDebugPrintf("initialzing RP%d failed - resetting\n", index);
+ //trigger the reset
+ data0 = pcie_regr(hRm, PcieRegType_AFI, afi_pexx_ctrl_0);
+ switch(index) {
+ case 0: data0 = NV_FLD_SET_DRF_NUM(AFI, PEX0_CTRL, PEX0_RST_L, 1, data0); break;
+ case 1: data0 = NV_FLD_SET_DRF_NUM(AFI, PEX1_CTRL, PEX1_RST_L, 1, data0); break;
+ default: NV_ASSERT(!"bad index"); goto FAIL;
+ }
+ pcie_regw(hRm, PcieRegType_AFI, afi_pexx_ctrl_0, data0);
+ NvOsSleepMS(100);
+ switch(index) {
+ case 0: data0 = NV_FLD_SET_DRF_NUM(AFI, PEX0_CTRL, PEX0_RST_L, 0, data0); break;
+ case 1: data0 = NV_FLD_SET_DRF_NUM(AFI, PEX1_CTRL, PEX1_RST_L, 0, data0); break;
+ default: NV_ASSERT(!"bad index"); goto FAIL;
+ }
+ pcie_regw(hRm, PcieRegType_AFI, afi_pexx_ctrl_0, data0);
+
+ //reenable pcie
+ data0 = pcie_regr(hRm, PcieRegType_AFI, AFI_CONFIGURATION_0);
+ data0 = data0 | AFI_CONFIGURATION_0_EN_FPCI_DEFAULT_MASK;
+ pcie_regw(hRm, PcieRegType_AFI, AFI_CONFIGURATION_0, data0);
+ }
+
+ data1 = NVPCIE_DRF_NUM(RP, VEND_XP, DL_UP, 1);
+ data0 = pcie_regr(hRm, pcieregtype_cfgx, NV_PROJ__PCIE2_RP_VEND_XP);
+ while (((data0 & data1) != data1) && timeout)
+ {
+ NvOsSleepMS(1);
+ data0 = pcie_regr(hRm, pcieregtype_cfgx, NV_PROJ__PCIE2_RP_VEND_XP);
+ timeout--;
+ }
+
+ if(!timeout) {
+ NvOsDebugPrintf("PCIe RP %d DL timed out after %dms\n", index, max_timeout);
+ continue;
+ }
+
+ NvOsDebugPrintf("PCIe RP %d DL took %d ms\n", index, (max_timeout - timeout));
+
+ timeout=max_timeout;
+ data1 = NVPCIE_DRF_NUM(RP, LINK_CONTROL_STATUS, LINKSTAT, 0x2000);
+ data0 = pcie_regr(hRm, pcieregtype_cfgx, NV_PROJ__PCIE2_RP_LINK_CONTROL_STATUS);
+ while (((data0 & data1) != data1) && timeout)
+ {
+ NvOsSleepMS(1);
+ data0 = pcie_regr(hRm, pcieregtype_cfgx, NV_PROJ__PCIE2_RP_LINK_CONTROL_STATUS);
+ timeout--;
+ NvOsDebugPrintf("Link status not up...retrying..\n");
+ }
+ if(!timeout) {
+ NvOsDebugPrintf("PCIe RP %d Link Control timed out after %dms\n", index, max_timeout);
+ continue;
+ }
+
+ NvOsDebugPrintf("PCIe RP %d Link Control took %dms\n", index, max_timeout-timeout);
+
+ retval=NvSuccess;
+ break; //if we're here, the root port is up
+ }
+
+FAIL:
+ return retval;
+}
+
+
+/* should be called only once and by the RmOpen */
+NvError NvRmPrivPcieOpen(NvRmDeviceHandle hRm)
+{
+ NvU32 data0, data1;
+ NvError err = NvSuccess;
+ ExecPlatform exec;
+ void *pCaps;
+ NvRmModuleCapability ModuleCaps[1];
+ NvOsInterruptHandler hInt = NvRmPrivHandlePcieInterrupt;
+ NvU32 irq;
+ NvU32 i;
+
+ exec = NvRmPrivGetExecPlatform(hRm);
+ if ((exec != ExecPlatform_Soc) && (exec != ExecPlatform_Fpga))
+ {
+ /* PCIE driver is not supported on other platforms */
+ return NvError_ModuleNotPresent;
+ }
+
+ NvRmPrivPciePowerControl(hRm, NV_TRUE);
+ NV_ASSERT_SUCCESS( NvRmSetModuleTristate(hRm,
+ NVRM_MODULE_ID(NvRmPrivModuleID_Pcie, 0), NV_FALSE));
+
+ ModuleCaps[0].MajorVersion = 1; // AP20
+ ModuleCaps[0].MinorVersion = 0;
+ ModuleCaps[0].EcoLevel = 0;
+ ModuleCaps[0].Capability = NULL;
+ // for now using null caps as this call is made to find the presence of the module.
+
+ err = NvRmModuleGetCapabilities(hRm, NVRM_MODULE_ID(NvRmPrivModuleID_Pcie, 0),
+ ModuleCaps, 1, (void **)&pCaps);
+ if (err != NvSuccess)
+ return err;
+
+ NvRmModuleGetBaseAddress(hRm, NvRmPrivModuleID_Pcie, &s_pciePhysical, &s_pcieSize);
+ if (s_pciePhysical == 0)
+ return NvError_ModuleNotPresent;
+
+ s_pcieMsiVectorCount=0;
+
+ for(i=0;i<MAX_MSI_HANDLERS;i++) {
+ MSIHandlers[i].sem=NULL;
+ }
+
+ /* Only map the 3 sub-apertures instead of the entire 1GB aperture. */
+ s_pcieSize = NVRM_PCIE_EXTENDED_CONFIG_SIZE + NVRM_PCIE_CONFIG_SIZE + NVRM_PCIE_REGISTER_APERTURE_SIZE;
+
+ err = NvRmPhysicalMemMap(s_pciePhysical,
+ s_pcieSize,
+ NVOS_MEM_READ_WRITE,
+ NvOsMemAttribute_Uncached,
+ (void** )&s_pcieBase);
+ if (err != NvSuccess)
+ {
+ goto fail;
+ }
+
+ /* Start PCIE refclock (enable PLLE) */
+ if (exec == ExecPlatform_Soc)
+ {
+ if (NvRmPowerRegister(hRm, 0, &s_PowerClientId) != NvSuccess)
+ goto fail;
+ if (NvRmPowerModuleClockControl(hRm, NvRmPrivModuleID_Pcie,
+ s_PowerClientId, NV_TRUE) != NvSuccess)
+ {
+ goto fail;
+ }
+ }
+
+ /* Pulse the reset to AFI and PCIe and keep the PCIXCLK in reset untill the
+ * AFI and PCIE are configured */
+ //NvRmModuleReset(hRm, NvRmPrivModuleID_Afi);
+ //NvRmModuleReset(hRm, NvRmPrivModuleID_Pcie);
+ //NvRmPrivModuleReset(hRm, NvRmPrivModuleID_PcieXclk, NV_TRUE);
+ NvRmModuleResetWithHold(hRm, NvRmPrivModuleID_Afi, NV_FALSE);
+ NvRmModuleResetWithHold(hRm, NvRmPrivModuleID_Pcie, NV_FALSE);
+ NvRmModuleResetWithHold(hRm, NvRmPrivModuleID_PcieXclk, NV_TRUE);
+
+
+ /* Enable slot clock and pulse external reset signal */
+ data0 = pcie_regr(hRm, PcieRegType_AFI, AFI_PEX0_CTRL_0);
+ data0 = NV_FLD_SET_DRF_NUM(AFI, PEX0_CTRL, PEX0_REFCLK_EN, 1, data0);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_PEX0_CTRL_0, data0);
+
+ data1 = pcie_regr(hRm, PcieRegType_AFI, AFI_PEX1_CTRL_0);
+ data1 = NV_FLD_SET_DRF_NUM(AFI, PEX1_CTRL, PEX1_REFCLK_EN, 1, data1);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_PEX1_CTRL_0, data1);
+ data0 = NV_FLD_SET_DRF_NUM(AFI, PEX0_CTRL, PEX0_RST_L, 0, data0);
+ data1 = NV_FLD_SET_DRF_NUM(AFI, PEX1_CTRL, PEX1_RST_L, 0, data1);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_PEX0_CTRL_0, data0);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_PEX1_CTRL_0, data1);
+ /* FIXME it seems that the PCIe devices need this much delay! */
+ NvOsSleepMS(100);
+
+ data0 = pcie_regr(hRm, PcieRegType_AFI, AFI_PEX0_CTRL_0);
+ data0 = NV_FLD_SET_DRF_NUM(AFI, PEX0_CTRL, PEX0_RST_L, 1, data0);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_PEX0_CTRL_0, data0);
+#ifndef SINGLE_PORT
+ data1 = pcie_regr(hRm, PcieRegType_AFI, AFI_PEX1_CTRL_0);
+ data1 = NV_FLD_SET_DRF_NUM(AFI, PEX0_CTRL, PEX0_RST_L, 1, data1);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_PEX1_CTRL_0, data1);
+#endif
+
+ // Verify the controller DEVIDs
+ data0 = pcie_regr(hRm, PcieRegType_CFG0, NV_PROJ__PCIE2_RP_DEV_ID);
+ if ((NVPCIE_DRF_VAL(RP, DEV_ID, VENDOR_ID, data0)) != NV_PROJ__PCIE2_RP_DEV_ID_VENDOR_ID_NVIDIA)
+ {
+ NV_ASSERT(!"Something broken cannot read PCIE root port 0 DevID register, check the clcoks to the module");
+ }
+#ifndef SINGLE_PORT
+
+ data0 = pcie_regr(hRm, PcieRegType_CFG1, NV_PROJ__PCIE2_RP_DEV_ID);
+ if ((NVPCIE_DRF_VAL(RP, DEV_ID, VENDOR_ID, data0)) != NV_PROJ__PCIE2_RP_DEV_ID_VENDOR_ID_NVIDIA)
+ {
+ NV_ASSERT(!"Something broken cannot read PCIE root port 1 DevID register, check the clcoks to the module");
+ }
+#endif
+
+ /* Enable dual controller and both ports*/
+ data0 = pcie_regr(hRm, PcieRegType_AFI, AFI_PCIE_CONFIG_0);
+ data0 = NV_FLD_SET_DRF_NUM(AFI, PCIE_CONFIG, PCIEC0_DISABLE_DEVICE, 0, data0);
+#ifndef SINGLE_PORT
+ data0 = NV_FLD_SET_DRF_NUM(AFI, PCIE_CONFIG, SM2TMS0_XBAR_CONFIG, 1, data0);
+ data0 = NV_FLD_SET_DRF_NUM(AFI, PCIE_CONFIG, PCIEC1_DISABLE_DEVICE, 0, data0);
+#endif
+ pcie_regw(hRm, PcieRegType_AFI, AFI_PCIE_CONFIG_0, data0);
+
+ /* This is for FPGA only, as it has seperate PHY */
+ if (exec == ExecPlatform_Fpga)
+ {
+ //FIXME - should be different for single port
+ pcie_regw(hRm, PcieRegType_AFI, AFI_WR_SCRATCH_0, 0x2020);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_WR_SCRATCH_0, 0x0);
+ NvOsSleepMS(100);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_WR_SCRATCH_0, 0x2020);
+ // CHECK PE0 PRESENT. Bit 3 will set to 1 when the the cable is present.
+ do
+ {
+ data0 = pcie_regr(hRm, PcieRegType_AFI, AFI_RD_SCRATCH_0);
+ } while ((data0 & 0x8) != 0x8);
+ } else
+ {
+ /* Initialze AP20 internal PHY */
+ //ENABLE up to 16 PCIE lanes
+ pcie_regw(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_CTL_SEL_1, 0x0);
+
+ //override IDDQ to 1 on all 4 lanes
+ data0 = pcie_regr(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_CTL_1);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(PADS, CTL_1, IDDQ_1L, 1, data0);
+ pcie_regw(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_CTL_1, data0);
+
+ //set up PHY PLL inputs select PLLE output as refclock
+ data0 = pcie_regr(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_PLL_CTL1);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(PADS, PLL_CTL1, PLL_REFCLK_SEL,
+ NV_PROJ__PCIE2_PADS_PLL_CTL1_PLL_REFCLK_SEL_INTERNAL_CML, data0);
+
+ //set TX ref sel to div10 (not div5)
+ data0 = NVPCIE_FLD_SET_DRF_NUM(PADS, PLL_CTL1, PLL_TXCLKREF_SEL,
+ NV_PROJ__PCIE2_PADS_PLL_CTL1_PLL_TXCLKREF_SEL_DIV10, data0);
+ pcie_regw(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_PLL_CTL1, data0);
+
+ //take PLL out of reset
+ data0 = pcie_regr(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_PLL_CTL1);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(PADS, PLL_CTL1, PLL_RST_B4SM,
+ NV_PROJ__PCIE2_PADS_PLL_CTL1_PLL_RST_B4SM_DEASSERT, data0);
+ pcie_regw(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_PLL_CTL1, data0);
+
+ //set the clock voltage to MCP default
+ //this register isn't in documentation!
+ //but it works and is necessary
+ data0=0xFA5CFA5C;
+ pcie_regw(hRm,PcieRegType_PADS, 0xc8, data0);
+
+ //check PLL is locked
+ data1 = 0;
+ data1 = NVPCIE_FLD_SET_DRF_NUM(PADS, PLL_CTL1, PLL_LOCKDET,
+ NV_PROJ__PCIE2_PADS_PLL_CTL1_PLL_LOCKDET_LOCKED, data1);
+ data0 = pcie_regr(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_PLL_CTL1);
+ while ((data0 & data1) != data1)
+ {
+ //wait for PLL to lock
+ data0 = pcie_regr(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_PLL_CTL1);
+ }
+
+ //turn off IDDQ override
+ data0 = pcie_regr(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_CTL_1);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(PADS, CTL_1, IDDQ_1L, 0, data0);
+ pcie_regw(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_CTL_1, data0);
+
+ //ENABLE TX/RX data
+ data0 = pcie_regr(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_CTL_1);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(PADS, CTL_1, TX_DATA_EN_1L,
+ NV_PROJ__PCIE2_PADS_CTL_1_TX_DATA_EN_1L_ENABLE, data0);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(PADS, CTL_1, RX_DATA_EN_1L,
+ NV_PROJ__PCIE2_PADS_CTL_1_RX_DATA_EN_1L_ENABLE, data0);
+ pcie_regw(hRm, PcieRegType_PADS, NV_PROJ__PCIE2_PADS_CTL_1, data0);
+ }
+
+ irq = NvRmGetIrqForLogicalInterrupt(hRm, NVRM_MODULE_ID(NvRmPrivModuleID_Pcie, 0), 0);
+ err = NvRmInterruptRegister( hRm, 1, &irq, &hInt, hRm,
+ &s_pcieInterruptHandle, NV_TRUE );
+ if (err != NvSuccess)
+ {
+ goto fail;
+ }
+
+ irq = NvRmGetIrqForLogicalInterrupt(hRm, NVRM_MODULE_ID(NvRmPrivModuleID_Pcie, 0), 1);
+ hInt= NvRmPrivHandlePcieMSI;
+
+ err = NvRmMemHandleCreate( hRm, &s_pcieMsiMemoryHandle, MAX_MSI_HANDLERS * sizeof(NvU32));
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+ err = NvRmMemAlloc(s_pcieMsiMemoryHandle, NULL, 0, MSI_MEMORY_ALIGNMENT,
+ NvOsMemAttribute_Uncached);
+ if( err != NvSuccess )
+ {
+ goto fail;
+ }
+
+ err = NvRmInterruptRegister( hRm, 1, &irq, &hInt, hRm,
+ &s_pcieMSIHandle, NV_TRUE );
+ if (err != NvSuccess)
+ {
+ goto fail;
+ }
+
+ /* setup the AFI address translations */
+ pcie_setupAfiAddressTranslations(hRm);
+
+ /* Take the PCIe interface module out of reset to start the PCIe training
+ * sequence */
+ NvRmModuleResetWithHold(hRm, NvRmPrivModuleID_PcieXclk, NV_FALSE);
+
+ /* Enable PCIE */
+ data0 = pcie_regr(hRm, PcieRegType_AFI, AFI_CONFIGURATION_0);
+ data0 = data0 | AFI_CONFIGURATION_0_EN_FPCI_DEFAULT_MASK;
+ pcie_regw(hRm, PcieRegType_AFI, AFI_CONFIGURATION_0, data0);
+
+ /* Assume no device */
+ s_PcieRootPort0Present = NV_FALSE;
+ s_PcieRootPort1Present = NV_FALSE;
+ s_PcieRootPortPresent = NV_FALSE;
+
+ if(CheckPcieRPx(hRm, 0)==NvSuccess)
+ s_PcieRootPort0Present=NV_TRUE;
+#ifndef SINGLE_PORT
+ if(CheckPcieRPx(hRm, 1)==NvSuccess)
+ s_PcieRootPort1Present=NV_TRUE;
+#endif
+
+ if(s_PcieRootPort0Present || s_PcieRootPort1Present)
+ s_PcieRootPortPresent=NV_TRUE;
+ else {
+ NvOsDebugPrintf("PCIe link failure on both root ports!\n");
+ err = NvError_DeviceNotFound;
+ goto fail;
+ }
+
+#ifdef SINGLE_PORT
+ data0=pcie_regr(hRm, PcieRegType_CFG0, NV_PROJ__PCIE2_RP_MISC0);
+ data0=NVPCIE_FLD_SET_DRF_NUM(RP, MISC0, ENABLE_CLUMPING, 1, data0);
+ pcie_regw(hRm, PcieRegType_CFG0, NV_PROJ__PCIE2_RP_MISC0, data0);
+
+#else
+ //if both ports are up, set up SLI mode too
+ //if(s_PcieRootPort0Present && s_PcieRootPort1Present) {
+ if(0){
+ //RP0
+ data0 = pcie_regr(hRm, PcieRegType_CFG0, NV_PROJ__PCIE2_RP_MISC0);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(RP, MISC0, NATIVE_P2P_ENABLE, 1, data0);
+ pcie_regw(hRm, PcieRegType_CFG0, NV_PROJ__PCIE2_RP_MISC0, data0);
+ //RP1
+ data0 = pcie_regr(hRm, PcieRegType_CFG1, NV_PROJ__PCIE2_RP_MISC0);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(RP, MISC0, NATIVE_P2P_ENABLE, 1, data0);
+ pcie_regw(hRm, PcieRegType_CFG1, NV_PROJ__PCIE2_RP_MISC0, data0);
+
+ data0 = pcie_regr(hRm, PcieRegType_AFI, AFI_FUSE_0);
+ data0 = NV_FLD_SET_DRF_NUM(AFI, FUSE, FUSE_PCIE_SLI_DIS, 0, data0);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_FUSE_0, data0);
+ }
+#endif
+ /* Enable PCIe interrupts */
+ data0 = 0;
+ data0 |= NV_DRF_NUM(AFI, AFI_INTR_ENABLE, EN_INI_SLVERR, 1);
+ data0 |= NV_DRF_NUM(AFI, AFI_INTR_ENABLE, EN_INI_DECERR, 1);
+ data0 |= NV_DRF_NUM(AFI, AFI_INTR_ENABLE, EN_TGT_SLVERR, 1);
+ data0 |= NV_DRF_NUM(AFI, AFI_INTR_ENABLE, EN_TGT_DECERR, 1);
+ data0 |= NV_DRF_NUM(AFI, AFI_INTR_ENABLE, EN_TGT_WRERR, 1);
+ data0 |= NV_DRF_NUM(AFI, AFI_INTR_ENABLE, EN_DFPCI_DECERR, 1);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_AFI_INTR_ENABLE_0, data0);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_SM_INTR_ENABLE_0, 0xffffffff);
+ data0 = 0;
+ data0 |= NV_DRF_NUM(AFI, INTR_MASK, INT_MASK, 1);
+ data0 |= NV_DRF_NUM(AFI, INTR_MASK, MSI_MASK, 1);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_INTR_MASK_0, data0);
+
+
+
+ /* set the PCI to generate secure upstream transactions */
+ data0 = 0;
+ /* Bit 0 = I/D access bit
+ * Bit 1 = NS bit
+ * Bit 2 = Normal/Privileged bit
+ * */
+ data0 |= NV_DRF_NUM(AFI, APROT_OVERRIDE, APROT_OVERRIDE_VAL, 0x0);
+ data0 |= NV_DRF_NUM(AFI, APROT_OVERRIDE, APROT_OVERRIDE_EN, 0x1);
+ //pcie_regw(hRm, PcieRegType_AFI, AFI_APROT_OVERRIDE_0, data0);
+
+ pcie_businit(hRm);
+ NvOsDebugPrintf("PCIe bus: Found %d devices/bridges on the pci bus\n", s_NumPciDevices);
+ if (NV_DEBUG)
+ {
+ pcie_buswalk(hRm, s_rp0);
+ }
+
+ /* Disable masksing off the aborts as the enumeration is done. From now on,
+ * any illegal access will cause data abort.
+ *
+ * Illegal access can be invalid bus segment, invalid pcie aperture
+ */
+ data0 = 0;
+ data0 |= NV_DRF_NUM(AFI, FPCI_ERROR_MASKS, MASK_FPCI_TARGET_ABORT, 1);
+ data0 |= NV_DRF_NUM(AFI, FPCI_ERROR_MASKS, MASK_FPCI_DATA_ERROR, 1);
+ data0 |= NV_DRF_NUM(AFI, FPCI_ERROR_MASKS, MASK_FPCI_MASTER_ABORT, 1);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_FPCI_ERROR_MASKS_0, data0);
+
+ return NvSuccess;
+
+fail:
+ if (s_pcieInterruptHandle != NULL)
+ {
+ NvRmInterruptUnregister(hRm, s_pcieInterruptHandle);
+ }
+ NvRmPhysicalMemUnmap(s_pcieBase, s_pcieSize);
+ if (s_pcieMsiMemoryHandle)
+ {
+ NvRmMemUnpin(s_pcieMsiMemoryHandle);
+ NvRmMemHandleFree(s_pcieMsiMemoryHandle);
+ s_pcieMsiMemoryHandle = NULL;
+ }
+ NvRmPrivPciePowerControl(hRm, NV_FALSE);
+ NV_ASSERT_SUCCESS( NvRmSetModuleTristate(hRm,
+ NVRM_MODULE_ID(NvRmPrivModuleID_Pcie, 0), NV_TRUE));
+ return err;
+}
+
+void NvRmPrivPcieClose(NvRmDeviceHandle hDeviceHandle)
+{
+ if (s_pcieInterruptHandle != NULL)
+ {
+ NvRmInterruptUnregister(hDeviceHandle, s_pcieInterruptHandle);
+ }
+ NvRmPowerModuleClockControl(hDeviceHandle, NvRmPrivModuleID_Pcie,
+ s_PowerClientId, NV_FALSE);
+ NvRmPowerUnRegister(hDeviceHandle, s_PowerClientId);
+ NvRmPhysicalMemUnmap(s_pcieBase, s_pcieSize);
+ s_pcieSize = 0;
+ s_pcieBase = 0;
+ s_pciePhysical = 0;
+ if (s_pcieMsiMemoryHandle)
+ {
+ NvRmMemUnpin(s_pcieMsiMemoryHandle);
+ NvRmMemHandleFree(s_pcieMsiMemoryHandle);
+ s_pcieMsiMemoryHandle = NULL;
+ }
+ NvRmPrivPciePowerControl(hDeviceHandle, NV_FALSE);
+ NV_ASSERT_SUCCESS( NvRmSetModuleTristate(hDeviceHandle,
+ NVRM_MODULE_ID(NvRmPrivModuleID_Pcie, 0), NV_TRUE));
+ return;
+}
+
+
+static
+NvRmPciDevice *pcie_allocDevice(NvRmDeviceHandle rm)
+{
+ static NvU32 index = 0;
+ NvRmPciDevice *dev;
+
+ if (index == 0)
+ {
+ NvOsMemset(pciDevices, 0, sizeof(pciDevices));
+ }
+
+ dev = &pciDevices[index];
+ index++;
+ s_NumPciDevices = index;
+ return dev;
+}
+
+
+static NvRmPciDevice *pcie_RecursiveSearch(NvRmPciDevice *device, NvU32 function_device_bus);
+
+static NvRmPciDevice *
+pcie_RecursiveSearch(NvRmPciDevice *root, NvU32 function_device_bus)
+{
+ NvRmPciDevice *device = 0;
+
+ if (root)
+ {
+ if (root->bus == function_device_bus)
+ {
+ return root;
+ }
+ device = pcie_RecursiveSearch(root->next, function_device_bus);
+ if (device)
+ return device;
+
+ return pcie_RecursiveSearch(root->child, function_device_bus);
+ }
+ return device;
+}
+
+
+static NvRmPciDevice *
+pcie_GetDevice(NvU32 function_device_bus)
+{
+ return pcie_RecursiveSearch(s_rp0, function_device_bus);
+}
+
+static void
+pcie_ConfigureDeviceBAR(NvRmDeviceHandle rm, NvRmPciDevice *dev)
+{
+ NvU32 size;
+ NvU8 flags;
+ NvRmPciResource r;
+ NvU32 bar_index;
+ NvU32 addr;
+ NvU32 control;
+
+ NvU32 io_base = dev->io_base;
+ NvU32 mem_base = dev->mem_base;
+ NvU32 prefetch_base = dev->prefetch_base;
+
+
+ for (bar_index = 0x0; bar_index < 6; bar_index ++)
+ {
+ NvBool valid = NV_FALSE;
+ size = 0xFFFFFFFF;
+ (void)NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Write,
+ bar_index * 4+ 0x10, (NvU8 *)&size, 4);
+ (void)NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Read,
+ bar_index * 4 + 0x10, (NvU8 *)&size, 4);
+
+ if (size == 0xffffffff) continue;
+ if (size == 0) continue; //some devices are broken
+ flags = (size & 0x000f);
+
+ /* Size align the addr and write that BAR offset */
+ if (flags & 0x1)
+ {
+ size &= ~0xF; // Ignore the last 4 bits
+ size |= 0xffff0000; //some devices hardwire the high bits of IO bars to 0
+ size = ~size; // Do the 1's complement
+ size += 1; // Add 1 to get the final size of the BAR.
+
+ r = NvRmPciResource_Io;
+ //IO spaces are at most 256 bytes, and sometimes
+ // they hardwire the upper 16 bits in the bar to 0
+ addr = io_base;
+ addr += (size-1);
+ addr &= ~(size-1);
+ if((addr + size)> NVRM_PCIE_DOWNSTREAM_IO_SIZE)
+ {
+ valid = NV_FALSE ;
+ NvOsDebugPrintf("Warning: could not allocate I/O mem for device %02x bar %d\n", dev->bus, bar_index);
+ }
+ else
+ {
+ io_base = addr + size;
+ valid = NV_TRUE;
+ }
+
+ /* FIXME assert if there is more space needed */
+
+ } else
+ {
+ size &= ~0xF; // Ignore the last 4 bits
+ size = ~size; // Do the 1's complement
+ size += 1; // Add 1 to get the final size of the BAR.
+
+ if (flags & 0x08)
+ {
+ r = NvRmPciResource_PrefetchMemory;
+ addr = prefetch_base;
+ addr += (size-1);
+ addr &= ~(size-1);
+
+ // make sure we have memory in the prefetchable memory space available for this
+ // we hit this case with 2xGT218 (they need 256MB for bar 1, and another ~32MB for bar 3)
+ if((addr + size) <= (dev->prefetch_max)) {
+ valid = NV_TRUE;
+ prefetch_base = addr + size;
+ } else {
+ NvOsDebugPrintf("Warning: could not allocate prefetchable memory for device %02x bar %d\n", dev->bus, bar_index);
+ valid = NV_FALSE;
+ }
+
+ } else
+ {
+ r = NvRmPciResource_NonPrefetchMemory;
+ addr = mem_base;
+ addr += (size-1);
+ addr &= ~(size-1);
+
+ if((addr+size) <= (dev->mem_max)) {
+ valid = NV_TRUE;
+ mem_base = addr + size;
+ } else {
+ NvOsDebugPrintf("Warning: could not allocate non-prefetchable memory for device %02x bar %d\n", dev->bus, bar_index);
+ valid = NV_FALSE;
+ }
+ }
+ }
+
+ if (valid==NV_TRUE)
+ {
+ NvRmReadWriteConfigSpace(rm, dev->bus,
+ NvRmPcieAccessType_Write, bar_index * 4 + 0x10,
+ (NvU8 *)&addr, 4);
+ }
+
+ dev->bar_base[bar_index] = addr;
+ dev->bar_size[bar_index] = size;
+ dev->bar_type[bar_index] = r;
+
+ /* handle 64 bit addresses differently */
+ if ((flags == 0x0c) || (flags==0x04))
+ {
+ //we're just locating it in 32bit space, so the 64 bit extension should be 0
+ NvU32 upper_addr_bits = 0;
+
+ //I hope the last bar doesn't claim to be 64 bit!!!
+ bar_index++;
+ NV_ASSERT(bar_index < 6);
+
+ (void)NvRmReadWriteConfigSpace(rm, dev->bus,
+ NvRmPcieAccessType_Write, bar_index * 4 + 0x10, (NvU8 *)&upper_addr_bits, 4);
+ }
+ }
+
+ dev->io_limit = io_base;
+ dev->mem_limit = mem_base;
+ dev->prefetch_limit = prefetch_base;
+
+ /* Update the control register to enable memory/io/bus-mastering */
+ NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Read,
+ NV_PROJ__PCIE2_RP_DEV_CTRL, (NvU8 *)&control, 2);
+ control = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, IO_SPACE, ENABLED, control);
+ control = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, MEMORY_SPACE, ENABLED, control);
+ control = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, BUS_MASTER, ENABLED, control);
+ control = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, INTR_DISABLE, NO, control);
+ control = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, SERR, ENABLED, control);
+ NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Write,
+ NV_PROJ__PCIE2_RP_DEV_CTRL, (NvU8 *)&control, 2);
+}
+
+static NvU8
+pcie_MsiGetOffset(NvRmDeviceHandle rm, NvRmPciDevice *device);
+
+//0 if it didn't find it. (offset can't be 0, that's the PCIE devid)
+static NvU8
+pcie_MsiGetOffset(NvRmDeviceHandle rm, NvRmPciDevice *device)
+{
+ NvU8 offset=0;
+ NvU8 src;
+ NvError e;
+
+ NV_CHECK_ERROR_CLEANUP(NvRmReadWriteConfigSpace(rm,
+ device->bus, NvRmPcieAccessType_Read,
+ NV_PROJ__PCIE2_RP_CAP_PTR, (NvU8 *)&src, 1));
+
+ offset=src;
+ offset&=0xfc;
+
+ while (offset)
+ {
+ NvU8 id;
+ NvU8 next_offset;
+ NvU16 configarea;
+
+ e = NvRmReadWriteConfigSpace(rm, device->bus,
+ NvRmPcieAccessType_Read,
+ offset, (NvU8*)&configarea, 2);
+ if (e!=NvSuccess)
+ {
+ offset=0;
+ goto fail;
+ }
+
+ id=(configarea & 0x00ff);
+ //high byte is the address of the next cap
+ next_offset=((configarea & 0xff00) >> 8);
+ next_offset &= 0xfc; //mask off the bottom two bits again
+
+ if (id==NV_PROJ__PCIE2_RP_MSI_CTRL_CAP_ID_MSI)
+ break;
+ offset=next_offset;
+ }
+
+fail:
+ return offset;
+}
+
+static NvBool pcie_Is64BitMsi(NvRmDeviceHandle rm, NvRmPciDevice *device, NvU8 offset);
+static NvBool
+pcie_Is64BitMsi(NvRmDeviceHandle rm, NvRmPciDevice *device, NvU8 offset)
+{
+ NvU16 message_control;
+
+ if (offset)
+ {
+ NvRmReadWriteConfigSpace(rm, device->bus, NvRmPcieAccessType_Read,
+ offset+2, (NvU8 *)&message_control, 2);
+ return (message_control & 0x0080) ? NV_TRUE : NV_FALSE;
+ }
+ return NV_FALSE;
+}
+
+
+static void pcie_SetMsiMultipleMessageEnable(NvRmDeviceHandle rm, NvRmPciDevice *device, NvU8 offset, NvU8 count);
+
+static void
+pcie_SetMsiMultipleMessageEnable(NvRmDeviceHandle rm, NvRmPciDevice *device, NvU8 offset, NvU8 count)
+{
+ NvU16 message_control=0;
+
+ if(offset)
+ {
+ NvU8 val;
+
+ switch (count) {
+ case 1: val=0x00; break;
+ case 2: val=0x01; break;
+ case 4: val=0x02; break;
+ case 8: val=0x03; break;
+ case 16: val=0x04; break;
+ case 32: val=0x05; break;
+ default:
+ NV_ASSERT(!"bad count");
+ return;
+ }
+
+
+ NvRmReadWriteConfigSpace(rm, device->bus, NvRmPcieAccessType_Read,
+ offset+2, (NvU8 *)&message_control, 2);
+
+ message_control &= 0x008f;
+ message_control |= (val<<4);
+
+ NvRmReadWriteConfigSpace(rm, device->bus, NvRmPcieAccessType_Write,
+ offset+2, (NvU8 *)&message_control, 2);
+
+ }
+}
+
+static void pcie_MsiSetAddr(NvRmDeviceHandle rm, NvRmPciDevice *device, NvU8 offset, NvU64 addr);
+
+static void
+pcie_MsiSetAddr(NvRmDeviceHandle rm, NvRmPciDevice *device, NvU8 offset, NvU64 addr)
+{
+ if(offset)
+ {
+ NvU32 low32;
+ NvU32 high32;
+
+ low32 = (NvU32)(addr);
+ high32 = (NvU32)(addr>>32);
+
+ if(!pcie_Is64BitMsi(rm, device, offset))
+ {
+ if(addr > 0x0000000100000000ull)
+ {
+ NvOsDebugPrintf("64 bit address given, but only 32 supported for MSI");
+ return;
+ }
+ }
+ else
+ {
+ //write the high bits of the 64 bit register
+ NvRmReadWriteConfigSpace(rm, device->bus, NvRmPcieAccessType_Write, offset+8, (NvU8 *)&high32, 4);
+ }
+
+ //the low 32 bits of 32 and 64 bit MSI are at the same spot
+ NvRmReadWriteConfigSpace(rm, device->bus, NvRmPcieAccessType_Write, offset+4, (NvU8 *)&low32, 4);
+ }
+}
+
+static void pcie_MsiSetData(NvRmDeviceHandle rm, NvRmPciDevice *device, NvU8 offset, NvU16 data);
+
+static void
+pcie_MsiSetData(NvRmDeviceHandle rm, NvRmPciDevice *device, NvU8 offset, NvU16 data)
+{
+ if (offset)
+ {
+ //data is in a different spot based on the size of the MSI
+ if (pcie_Is64BitMsi(rm, device, offset))
+ NvRmReadWriteConfigSpace(rm, device->bus, NvRmPcieAccessType_Write, offset+12, (NvU8 *)&data, 2);
+ else
+ NvRmReadWriteConfigSpace(rm, device->bus, NvRmPcieAccessType_Write, offset+8, (NvU8 *)&data, 2);
+ }
+}
+
+static void
+pcie_ConfigureMSI(NvRmDeviceHandle rm, NvRmPciDevice *device)
+{
+ //iterate through the extended config looking for the MSI record
+ NvU8 offset;
+
+ //search for the MSI pointer
+ offset = pcie_MsiGetOffset(rm, device);
+ if (offset)
+ {
+ NvU64 addr;
+ NvU16 new_vector_count;
+ NvU8 num_vectors;
+
+ if (!pcie_Is64BitMsi(rm, device, offset))
+ {
+ //the address we use is a 64 bit one - would need to change all this
+ NV_ASSERT(!"32 bit MSI device unsupported\n");
+ return;
+ }
+
+ addr = pcie_regr(rm, PcieRegType_AFI, AFI_MSI_FPCI_BAR_ST_0);
+ //FIXME - what is this supposed to be - need to test if this needs to be shifted
+ //addr=addr
+
+ NvRmReadWriteConfigSpace(rm, device->bus, NvRmPcieAccessType_Read,
+ offset+2, (NvU8 *)&num_vectors, 2);
+ num_vectors = (num_vectors & 0x000e) >> 1;
+ num_vectors = 1 << num_vectors;
+ NV_ASSERT(num_vectors <= 32);
+
+ //new vector base has to start at a multiple of num_vectors.
+ //i.e. if num_vectors is 8 then start has to be xxxxx000
+ // if num_vectors is 2 then start has to be xxxxxxx0
+ new_vector_count=((s_pcieMsiVectorCount + (num_vectors - 1)) & ~(num_vectors-1));
+ NV_ASSERT(new_vector_count < 256);
+
+ pcie_MsiSetAddr(rm, device, offset, addr + new_vector_count);
+ pcie_MsiSetData(rm, device, offset, new_vector_count);
+
+ pcie_SetMsiMultipleMessageEnable(rm, device, offset, num_vectors);
+ s_pcieMsiVectorCount=new_vector_count;
+ }
+}
+
+
+
+
+static void
+pcie_scanbus(NvRmDeviceHandle rm, NvRmPciDevice *dev_parent)
+{
+ NvU32 subordinate_bus;
+ NvU32 next_bus_number;
+ NvU32 device = 0;
+ NvU32 data;
+ NvU32 id;
+ NvError err;
+ NvRmPciDevice *dev;
+
+ next_bus_number = dev_parent->sec_bus;
+ while (1)
+ {
+ NvU32 retry_count = 6;
+ if (device == 0x20)
+ {
+ dev_parent->sub_bus = next_bus_number;
+ if (!dev_parent->isRootPort)
+ {
+ /* Change the subordinate bus-number to the actual value of all
+ * buses on the hierarcy.
+ *
+ * Do this execpt for the root port.
+ */
+ data = next_bus_number;
+ NvRmReadWriteConfigSpace(rm, (dev_parent->bus),
+ NvRmPcieAccessType_Write, NV_PROJ__PCIE2_RP_BN_LT + 0x2,
+ (NvU8 *)&data, 1);
+ }
+ return;
+ }
+
+ if (dev_parent->isRootPort && device != 0)
+ {
+ /* Sepcial Exit condition for root ports, as AP20 root port seems to
+ * connect to only one device */
+ return;
+ }
+
+ while (--retry_count)
+ {
+ err = NvRmReadWriteConfigSpace(rm, (dev_parent->sec_bus | device << 8),
+ NvRmPcieAccessType_Read, 0x0, (NvU8 *)&id, 4);
+ if (err != NvSuccess)
+ {
+ // NvOsDebugPrintf("PCIe link is not up\n");
+ return;
+ }
+ if (id != 0xFFFFFFFF)
+ {
+ /* Found a valid device, break. Otherwise, retry a couple of
+ * times. It is possible that the bridges can take some time to
+ * settle and it will take couple of transcations to find the
+ * devcies behind the bridge.
+ */
+ //this is still necessary, to detect the g98 behind the br04
+ NvOsSleepMS(100); /* FIXME it seems that the PCIe devices need this much delay! */
+ break;
+ }
+ }
+ if (id == 0xFFFFFFFF)
+ {
+ /* Invalid device. Skip that one and look for next device */
+ device++;
+ continue;
+ }
+
+ dev = pcie_allocDevice(rm);
+ /* Fill the device information */
+ dev->parent = dev_parent;
+ dev->id = id;
+ dev->bus = dev_parent->sec_bus | device << 8;
+ if (dev_parent->child == NULL)
+ {
+ dev_parent->child = dev;
+ dev->prev = NULL;
+ } else
+ {
+ /* Add dev to the list of devices on the same bus */
+ NvRmPciDevice *temp;
+
+ temp = dev_parent->child;
+ NV_ASSERT(temp != NULL);
+ while (temp->next != NULL)
+ temp = temp->next;
+ temp->next = dev;
+ dev->prev = temp;
+ }
+
+ (void)NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Read,
+ NV_PROJ__PCIE2_RP_MISC_1, (NvU8 *)&data, 4);
+ data = (data >> 16);
+ if ((data & 0x7f) == 0x1)
+ {
+ /* Bridge device */
+
+ /* Temporarily assign 0xff for the subordinate bus-number, as we don't
+ * know how many devices are preset in the system */
+ subordinate_bus = 0xff;
+ dev->sec_bus = next_bus_number + 1;
+ data = (subordinate_bus << 16) | (dev->sec_bus << 8) | (dev_parent->sec_bus);
+ NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Write,
+ NV_PROJ__PCIE2_RP_BN_LT, (NvU8 *)&data, 3);
+
+ /* Scan all the buses behind this bridge */
+ pcie_scanbus(rm, dev);
+
+ next_bus_number = dev->sub_bus;
+ } else if ((data & 0x7f) == 0x0)
+ {
+ //NvOsDebugPrintf("PCI endpoint (0x%x) is on bus = %d, device = %d\n",
+ // id, dev_parent->sec_bus, device);
+ /* PCI endpoint - Can be single function or multie function */
+
+ } else if ((data & 0x7f) == 0x2)
+ {
+ /* PC card device */
+ } else
+ {
+ NV_ASSERT(!"invalid or malfunctional PCIe device \n");
+ }
+ device ++;
+ }
+}
+
+static
+void pcie_allocateResoruces(NvRmDeviceHandle rm, NvRmPciDevice *dev)
+{
+ NvU32 data;
+
+ if (dev == NULL)
+ return;
+
+ if (!dev->isRootPort)
+ {
+ dev->mem_base = dev->parent->mem_base;
+ dev->io_base = dev->parent->io_base;
+ dev->prefetch_base = dev->parent->prefetch_base;
+
+ dev->mem_limit = dev->parent->mem_limit;
+ dev->io_limit = dev->parent->io_limit;
+ dev->prefetch_limit = dev->parent->prefetch_limit;
+
+ dev->mem_max = dev->parent->mem_max;
+ dev->prefetch_max = dev->parent->prefetch_max;
+ }
+
+ /* Employing a depth first search algorithm for resource allocation. */
+ if (dev->child != NULL)
+ {
+ pcie_allocateResoruces(rm, dev->child);
+ }
+
+ if (dev->next != NULL)
+ {
+ pcie_allocateResoruces(rm, dev->next);
+ }
+
+ /* A PCI device */
+ if (dev->sub_bus == 0)
+ {
+ NvRmPciDevice *nextDev;
+
+ /* If this is first device on the bus, get the "base" from the parent,
+ * if not get the base from the next node on the chain which is not
+ * disabled. Disabled node is node that is either malfunctioning or a
+ * bridge with no devices beneath that device. */
+ nextDev = dev->next;
+ while (nextDev != NULL)
+ {
+ if (!nextDev->IsDisabled)
+ break;
+ nextDev = nextDev->next;
+ }
+ if (nextDev)
+ {
+ dev->mem_base = nextDev->mem_limit;
+ dev->io_base = nextDev->io_limit;
+ dev->prefetch_base = nextDev->prefetch_limit;
+ }
+ pcie_ConfigureDeviceBAR(rm, dev);
+
+ pcie_ConfigureMSI(rm, dev);
+ } else
+ {
+ NvRmPciDevice *child_dev;
+ NvBool is64bit = NV_FALSE;
+
+ /* First enabled child will have the max values of the address
+ * regions for all the devices on that bus */
+ child_dev = dev->child;
+ while (child_dev != NULL)
+ {
+ if (!child_dev->IsDisabled)
+ break;
+ child_dev = child_dev->next;
+ }
+ if (child_dev == NULL)
+ {
+ dev->IsDisabled = NV_TRUE;
+ dev->mem_limit = 0;
+ dev->io_limit = 0;
+ dev->prefetch_limit = 0;
+ dev->mem_base = 0;
+ dev->io_base = 0;
+ dev->prefetch_base = 0;
+ /* No valid child dev behind this bridge. Just return */
+ return;
+ }
+
+ dev->mem_limit = child_dev->mem_limit;
+ dev->io_limit = child_dev->io_limit;
+ dev->prefetch_limit = child_dev->prefetch_limit;
+
+ /* Now program the bridge address filtering registers */
+
+ if (dev->isRootPort)
+ {
+ /* Root port is handled differently just return */
+ return;
+ }
+
+ /* Program the non-prefetchable memory base and limit for the bridge */
+ data = 0;
+ data = NVPCIE_FLD_SET_DRF_NUM(RP, MEM_BL, MEM_BASE, dev->mem_base >> 20, data);
+ data = NVPCIE_FLD_SET_DRF_NUM(RP, MEM_BL, MEM_LIMIT, dev->mem_limit >> 20, data);
+ NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Write,
+ NV_PROJ__PCIE2_RP_MEM_BL, (NvU8 *)&data, 4);
+
+ /* Program the prefetchable memory base and limit for the bridge. */
+ NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Read,
+ NV_PROJ__PCIE2_RP_PRE_BL, (NvU8 *)&data, 4);
+ if ((NVPCIE_DRF_VAL(RP, PRE_BL, B64BIT, data)) == 1)
+ {
+ is64bit = NV_TRUE;
+ }
+
+ data = 0;
+ data = NVPCIE_FLD_SET_DRF_NUM(RP, PRE_BL, PREFETCH_MEM_BASE, dev->prefetch_base >> 20, data);
+ data = NVPCIE_FLD_SET_DRF_NUM(RP, PRE_BL, PREFETCH_MEM_LIMIT, dev->prefetch_limit >> 20, data);
+ if (is64bit)
+ {
+ data = NVPCIE_FLD_SET_DRF_DEF(RP, PRE_BL, L64BIT, YES, data);
+ data = NVPCIE_FLD_SET_DRF_DEF(RP, PRE_BL, B64BIT, YES, data);
+ }
+ NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Write,
+ NV_PROJ__PCIE2_RP_PRE_BL, (NvU8 *)&data, 4);
+ if (is64bit)
+ {
+ data = 0;
+ NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Write,
+ NV_PROJ__PCIE2_RP_PRE_LU32, (NvU8 *)&data, 4);
+ NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Write,
+ NV_PROJ__PCIE2_RP_PRE_BU32, (NvU8 *)&data, 4);
+ }
+
+ /* Update the control register to enable memory/io/bus-mastering */
+ NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Read,
+ NV_PROJ__PCIE2_RP_DEV_CTRL, (NvU8 *)&data, 2);
+ data = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, IO_SPACE, ENABLED, data);
+ data = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, MEMORY_SPACE, ENABLED, data);
+ data = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, BUS_MASTER, ENABLED, data);
+ data = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, INTR_DISABLE, NO, data);
+ data = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, SERR, ENABLED, data);
+ NvRmReadWriteConfigSpace(rm, dev->bus, NvRmPcieAccessType_Write,
+ NV_PROJ__PCIE2_RP_DEV_CTRL, (NvU8 *)&data, 2);
+ }
+ return;
+}
+
+static
+void pcie_buswalk(NvRmDeviceHandle rm, NvRmPciDevice *dev)
+{
+ NvU32 i;
+ if (dev == NULL)
+ return;
+
+ NvOsDebugPrintf("PCIe device/bridge\n");
+ NvOsDebugPrintf(" Id = 0x%x bus = 0x%x\n", dev->id, dev->bus);
+ if (dev->IsDisabled)
+ {
+ NvOsDebugPrintf(" Device/bridge disabled \n");
+ } else
+ {
+ NvOsDebugPrintf(" mem_base = 0x%x mem_limit = 0x%x\n",
+ dev->mem_base, dev->mem_limit);
+ NvOsDebugPrintf(" prefetch_base = 0x%x prefetch_limit = 0x%x\n",
+ dev->prefetch_base, dev->prefetch_limit);
+ NvOsDebugPrintf(" io_base = 0x%x io_limit = 0x%x\n",
+ dev->io_base, dev->io_limit);
+ for (i=0; i< 6; i++)
+ {
+ //skip printing the empty ones
+ if((dev->bar_base[i]) || (dev->bar_size[i]) || (dev->bar_type[i]))
+ NvOsDebugPrintf(" bar(%d) base = 0x%x size = 0x%x type = %d\n",
+ i, dev->bar_base[i], dev->bar_size[i], dev->bar_type[i]);
+ }
+ }
+
+ if (dev->child != NULL)
+ pcie_buswalk(rm, dev->child);
+
+ if (dev->next != NULL)
+ pcie_buswalk(rm, dev->next);
+}
+
+
+void pcie_businit(NvRmDeviceHandle hRm)
+{
+ pcie_businitx(hRm, 0);
+ pcie_businitx(hRm, 1);
+}
+
+void pcie_businitx(NvRmDeviceHandle rm, int rpindex)
+{
+ //note, this function is not re-entrant.
+ NvRmPciDevice *rp=NULL;
+ NvU32 data0;
+ //some constants, depending on which bus we're on
+ const NvU32 pcieregtype_cfgx= (rpindex) ? PcieRegType_CFG1 : PcieRegType_CFG0;
+ const NvU32 rootport_x_bus= (rpindex) ? ROOTPORT_1_BUS : ROOTPORT_0_BUS;
+ const NvU32 rootport_x_subbus= (rpindex) ? ROOTPORT_1_SUBBUS : ROOTPORT_0_SUBBUS;
+ NvU32 *pciconfigx= (rpindex) ? s_pciConfig1 : s_pciConfig0;
+
+ switch(rpindex) {
+ case 0:
+ case 1:
+ break;
+ default:
+ //if there's more than two root ports, various
+ //parts of this function will need to be fixed
+ NV_ASSERT(!"Bad root port index");
+ goto fail;
+ }
+
+ //is this root port present?
+ if((rpindex==0) && (s_PcieRootPort0Present == NV_FALSE)) goto fail;
+ if((rpindex==1) && (s_PcieRootPort1Present == NV_FALSE)) goto fail;
+
+ rp=pcie_allocDevice(rm);
+
+ //if there's already a root port allocated, attach to that one's "next"
+ //there shouldn't be anything attached to it already
+ //otherwise, this is the first root port
+ if(s_rp0)
+ if(s_rp0->next)
+ NV_ASSERT(!("next for s_rp0\n")); //shouldn't be possible
+ else
+ s_rp0->next=rp;
+ else
+ s_rp0 = rp;
+
+ rp->isRootPort = NV_TRUE;
+ rp->sec_bus = rootport_x_bus + 1;
+ rp->sub_bus = rootport_x_subbus;
+ rp->bus = rootport_x_bus;
+ rp->id = pcie_regr(rm, pcieregtype_cfgx, NV_PROJ__PCIE2_RP_DEV_ID);
+
+ /* Read the config space into the SW shadow, configure the config and then
+ * write back */
+ pcie_ReadRPConfig(rm, (NvU8 *)pciconfigx, sizeof(s_pciConfig0), 0, rpindex);
+
+ /* Set the root port bus numbers to maximum. */
+ data0 = pciconfigx[NV_PROJ__PCIE2_RP_BN_LT/4];
+ data0 = NVPCIE_FLD_SET_DRF_NUM(RP, BN_LT, PRI_BUS_NUMBER, rp->bus, data0);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(RP, BN_LT, SEC_BUS_NUMBER, rp->sec_bus, data0);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(RP, BN_LT, SUB_BUS_NUMBER, rp->sub_bus, data0);
+ pciconfigx[NV_PROJ__PCIE2_RP_BN_LT/4] = data0;
+ pcie_WriteRPConfig(rm, (NvU8 *)&data0, 4, NV_PROJ__PCIE2_RP_BN_LT, rpindex);
+
+ /* Scan the bus and assign the bus numbers. */
+ pcie_scanbus(rm, rp);
+
+ /* fix the sub-ordinate bus number for the root port */
+ data0 = pciconfigx[NV_PROJ__PCIE2_RP_BN_LT/4];
+ data0 = NVPCIE_FLD_SET_DRF_NUM(RP, BN_LT, SUB_BUS_NUMBER, rp->sub_bus, data0);
+ pciconfigx[NV_PROJ__PCIE2_RP_BN_LT/4] = data0;
+ pcie_WriteRPConfig(rm, (NvU8 *)&data0, 4, NV_PROJ__PCIE2_RP_BN_LT, rpindex);
+
+ //each port gets half the range
+ if(s_rp0 == rp) {
+ rp->mem_base = FPCI_NON_PREFETCH_MEMORY_OFFSET;
+ rp->mem_max = FPCI_NON_PREFETCH_MEMORY_OFFSET + NVRM_PCIE_NON_PREFETCH_MEMORY_SIZE/2;
+ rp->prefetch_base = FPCI_PREFETCH_MEMORY_OFFSET;
+ rp->prefetch_max = FPCI_PREFETCH_MEMORY_OFFSET + NVRM_PCIE_PREFETCH_MEMORY_SIZE/2;
+ rp->io_base = 0x16; //not starting at 0 - some drivers have issue with 0
+ } else {
+ rp->mem_base = FPCI_NON_PREFETCH_MEMORY_OFFSET + (NVRM_PCIE_NON_PREFETCH_MEMORY_SIZE/2);
+ rp->mem_max = FPCI_NON_PREFETCH_MEMORY_OFFSET + NVRM_PCIE_NON_PREFETCH_MEMORY_SIZE;
+ rp->prefetch_base = FPCI_PREFETCH_MEMORY_OFFSET + (NVRM_PCIE_PREFETCH_MEMORY_SIZE/2);
+ rp->prefetch_max = FPCI_PREFETCH_MEMORY_OFFSET + NVRM_PCIE_PREFETCH_MEMORY_SIZE;
+ rp->io_base = s_rp0->io_limit;
+ }
+
+ pcie_allocateResoruces(rm, rp);
+
+ /* Memory/IO/prefetch address filtering */
+ data0 = pciconfigx[NV_PROJ__PCIE2_RP_IO_BL_SS/4];
+ data0 = NVPCIE_FLD_SET_DRF_DEF(RP, IO_BL_SS, IO_BASE_SUPPORT, 32, data0);
+ data0 = NVPCIE_FLD_SET_DRF_DEF(RP, IO_BL_SS, IO_BASE, ADDRESS_0, data0);
+ data0 = NVPCIE_FLD_SET_DRF_DEF(RP, IO_BL_SS, IO_LIMIT_SUPPORT, 32, data0);
+ data0 = NVPCIE_FLD_SET_DRF_DEF(RP, IO_BL_SS, IO_LIMIT, ADDRESS_256, data0);
+ pciconfigx[NV_PROJ__PCIE2_RP_IO_BL_SS/4] = data0;
+ pciconfigx[NV_PROJ__PCIE2_RP_IO_BL_U16/4] = 0;
+ data0 = 0;
+ data0 = NVPCIE_FLD_SET_DRF_NUM(RP, MEM_BL, MEM_BASE, rp->mem_base >> 20, data0);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(RP, MEM_BL, MEM_LIMIT, (rp->mem_limit-1) >> 20, data0);
+ pciconfigx[NV_PROJ__PCIE2_RP_MEM_BL/4] = data0;
+ data0 = 0;
+ data0 = NVPCIE_FLD_SET_DRF_DEF(RP, PRE_BL, B64BIT, YES, data0);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(RP, PRE_BL, PREFETCH_MEM_BASE, rp->prefetch_base >> 20, data0);
+ data0 = NVPCIE_FLD_SET_DRF_DEF(RP, PRE_BL, L64BIT, YES, data0);
+ data0 = NVPCIE_FLD_SET_DRF_NUM(RP, PRE_BL, PREFETCH_MEM_LIMIT, (rp->prefetch_limit-1) >> 20, data0);
+ pciconfigx[NV_PROJ__PCIE2_RP_PRE_BL/4] = data0;
+ pciconfigx[NV_PROJ__PCIE2_RP_PRE_BU32/4] = 0x0;
+ pciconfigx[NV_PROJ__PCIE2_RP_PRE_LU32/4] = 0x0;
+
+ /* command register */
+ data0 = 0;
+ data0 = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, IO_SPACE, ENABLED, data0);
+ data0 = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, MEMORY_SPACE, ENABLED, data0);
+ data0 = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, BUS_MASTER, ENABLED, data0);
+ data0 = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, INTR_DISABLE, NO, data0);
+ data0 = NVPCIE_FLD_SET_DRF_DEF(RP, DEV_CTRL, SERR, ENABLED, data0);
+ pciconfigx[NV_PROJ__PCIE2_RP_DEV_CTRL/4] = data0;
+
+ pcie_WriteRPConfig(rm, (NvU8 *)pciconfigx, sizeof(s_pciConfig0),0, rpindex);
+
+fail:
+ return;
+}
+
+
+/* For now implementing a static address translation. One can do some complicated
+ * dynamic address mapping, but AP20 use case, doesn't need one.
+ *
+ * It would be non-trivial to support dynamic memory mapping.
+ *
+ * */
+NvRmPhysAddr NvRmMapPciMemory(
+ NvRmDeviceHandle hDeviceHandle,
+ NvRmPciPhysAddr mem,
+ NvU32 size )
+{
+ NvRmPciPhysAddr prefetch_base;
+ NvRmPciPhysAddr prefetch_limit;
+ NvRmPciPhysAddr mem_base;
+ NvRmPciPhysAddr mem_limit;
+
+ prefetch_base = FPCI_PREFETCH_MEMORY_OFFSET;
+ mem_base = FPCI_NON_PREFETCH_MEMORY_OFFSET;
+
+ prefetch_limit = FPCI_PREFETCH_MEMORY_OFFSET + NVRM_PCIE_PREFETCH_MEMORY_SIZE;
+ mem_limit = FPCI_NON_PREFETCH_MEMORY_OFFSET + NVRM_PCIE_NON_PREFETCH_MEMORY_SIZE;
+
+ if (mem >= mem_base && ((mem + size) <= mem_limit))
+ {
+ return (s_pciePhysical + (NvRmPhysAddr)(((mem - mem_base) + NVRM_PCIE_NON_PREFETCH_MEMORY_OFFSET)));
+ }
+ if (mem >= prefetch_base && ((mem + size) <= prefetch_limit))
+ {
+ return (s_pciePhysical + (NvRmPhysAddr)(((mem - prefetch_base) + NVRM_PCIE_PREFETCH_MEMORY_OFFSET)));
+ }
+ return 0;
+}
+
+void NvRmUnmapPciMemory(
+ NvRmDeviceHandle hDeviceHandle,
+ NvRmPhysAddr mem,
+ NvU32 size )
+{
+ return;
+}
+
+NvError NvRmRegisterPcieLegacyHandler(
+ NvRmDeviceHandle rm,
+ NvU32 function_device_bus,
+ NvOsSemaphoreHandle sem,
+ NvBool InterruptEnable)
+{
+ NvError retval=NvSuccess;
+ NvRmPciDevice *device;
+ NvU32 index;
+ static NvBool initialized=NV_FALSE;
+
+ device = pcie_GetDevice(function_device_bus);
+ if (device == NULL)
+ {
+ retval=NvError_DeviceNotFound;
+ goto fail;
+ }
+
+ if(!initialized)
+ {
+ for(index=0; index<=MAX_LEGACY_HANDLERS; index++)
+ {
+ LegacyHandlers[index].sem=0;
+ }
+ initialized=NV_TRUE;
+ }
+
+ for(index=0; index<MAX_LEGACY_HANDLERS; index++)
+ {
+ if(LegacyHandlers[index].sem==0) break;
+ }
+
+ if(index>=MAX_LEGACY_HANDLERS)
+ {
+ NvOsDebugPrintf("ran out of legacy interrupt handles!\n");
+ goto fail;
+ }
+ LegacyHandlers[index].sem = sem;
+
+ if (InterruptEnable)
+ {
+ //do something here?
+ }
+
+fail:
+ return retval;
+}
+
+/* max of 256 possible MSI handlers, but normally much less*/
+NvError NvRmRegisterPcieMSIHandler(
+ NvRmDeviceHandle rm,
+ NvU32 function_device_bus,
+ NvU32 index,
+ NvOsSemaphoreHandle sem,
+ NvBool InterruptEnable)
+{
+ //#1 - how to uniquely identify the device?
+ //And which MSI to handle?
+ //does this actually need the rm handle? There can't be multiple instances.
+
+ //device is function_device_bus
+ NvError retval=NvSuccess;
+
+ NvU8 offset;
+ NvU16 num_messages;
+ NvU16 baseIndex;
+ NvRmPciDevice *device;
+
+ device = pcie_GetDevice(function_device_bus);
+ if (device == NULL)
+ {
+ retval=NvError_DeviceNotFound;
+ goto fail;
+ }
+
+ offset = pcie_MsiGetOffset(rm, device);
+ //first make sure there's msi's for this device.
+ if (!offset) {
+ retval = NvError_DeviceNotFound;
+ goto fail;
+ }
+
+ NvRmReadWriteConfigSpace(rm, device->bus, NvRmPcieAccessType_Read,
+ offset+2, (NvU8 *)&num_messages, 2);
+
+ num_messages = (num_messages & 0x0070) >> 4;
+ num_messages = 1 << num_messages;
+
+ /* MSI interrupts are allocated during enumeration. Find the vector that is
+ * mapped to this device */
+ if (pcie_Is64BitMsi(rm, device, offset))
+ NvRmReadWriteConfigSpace(rm, device->bus,
+ NvRmPcieAccessType_Read, offset+12, (NvU8 *)&baseIndex, 2);
+ else
+ NvRmReadWriteConfigSpace(rm, device->bus,
+ NvRmPcieAccessType_Read, offset+8, (NvU8 *)&baseIndex, 2);
+
+ index += baseIndex;
+ MSIHandlers[index].sem = sem;
+
+ if (InterruptEnable) {
+ NvU16 message_control;
+
+ NvRmReadWriteConfigSpace(rm, device->bus, NvRmPcieAccessType_Read,
+ offset+2, (NvU8 *)&message_control, 2);
+
+ message_control &= 0x00fe;
+ message_control |= 1;
+
+ NvRmReadWriteConfigSpace(rm, device->bus, NvRmPcieAccessType_Write,
+ offset+2, (NvU8 *)&message_control, 2);
+ }
+
+fail:
+ return retval;
+}
+
+static
+void NvRmPrivHandlePcieInterrupt(void *arg)
+{
+ NvU32 intr_info, intr_extended_info;
+ NvRmDeviceHandle rm = (NvRmDeviceHandle)arg;
+
+ intr_info = pcie_regr(rm, PcieRegType_AFI, AFI_INTR_CODE_0);
+ intr_info = NV_DRF_VAL(AFI, INTR_CODE, INT_CODE, intr_info);
+ intr_extended_info = pcie_regr(rm, PcieRegType_AFI, AFI_INTR_SIGNATURE_0);
+ switch (intr_info)
+ {
+ /* Interrupt code */
+ case 6:
+ {
+ NvBool match=NV_FALSE;
+ NvU32 i;
+ for(i=0; i<MAX_LEGACY_HANDLERS; i++) {
+ if(LegacyHandlers[i].sem) {
+ NvOsSemaphoreSignal(LegacyHandlers[i].sem);
+ match=NV_TRUE;
+ }
+ }
+ if(match==NV_FALSE)
+ NvOsDebugPrintf("Got an unhandled sideband message interrupt 0x%x\n", intr_extended_info);
+ }
+ break;
+ case 1:
+ /* SLVERR */
+ NvOsDebugPrintf("AXI Slave error interrupt\n");
+ break;
+ case 2:
+ /* DECERR */
+ NvOsDebugPrintf("AXI decode error interrupt\n");
+ break;
+ case 3:
+ /* PCIE target abort */
+ NvOsDebugPrintf("PCIE target abort interrupt\n");
+ break;
+ case 4:
+ /* PCIE master abort */
+ // Don't print this, as this error is a common error during
+ // enumeration.
+ // NvOsDebugPrintf("PCIE master abort interrupt\n");
+ break;
+ case 5:
+ /* Bufferable write to non-posted write */
+ NvOsDebugPrintf("Invalid write interrupt: Bufferable write to non-posted region\n");
+ break;
+ case 7:
+ /* Response address mapping error */
+ NvOsDebugPrintf("PCIE response decoding error interrupt\n");
+ break;
+ case 8:
+ /* Response address mapping error */
+ NvOsDebugPrintf("AXI response decoding error interrupt\n");
+ break;
+ case 9:
+ /* PCIE timeout */
+ NvOsDebugPrintf("PCIE transcation timeout\n");
+ break;
+ default:
+ break;
+ }
+ /* Clear the interrupt code register to sample the next interrupt */
+ pcie_regw(rm, PcieRegType_AFI, AFI_INTR_CODE_0, 0);
+
+ NvRmInterruptDone(s_pcieInterruptHandle);
+ return;
+}
+
+
+void NvRmPrivHandlePcieMSI(void *arg)
+{
+ NvU32 vecs[8];
+ int i;
+ NvRmDeviceHandle hRm=arg;
+
+ //figure out which MSI we got, and then call the handler
+
+ vecs[0]=pcie_regr(hRm, PcieRegType_AFI, AFI_MSI_VEC0_0);
+ vecs[1]=pcie_regr(hRm, PcieRegType_AFI, AFI_MSI_VEC1_0);
+ vecs[2]=pcie_regr(hRm, PcieRegType_AFI, AFI_MSI_VEC2_0);
+ vecs[3]=pcie_regr(hRm, PcieRegType_AFI, AFI_MSI_VEC3_0);
+ vecs[4]=pcie_regr(hRm, PcieRegType_AFI, AFI_MSI_VEC4_0);
+ vecs[5]=pcie_regr(hRm, PcieRegType_AFI, AFI_MSI_VEC5_0);
+ vecs[6]=pcie_regr(hRm, PcieRegType_AFI, AFI_MSI_VEC6_0);
+ vecs[7]=pcie_regr(hRm, PcieRegType_AFI, AFI_MSI_VEC7_0);
+
+ for (i=0; i<8; i++)
+ {
+ while (vecs[i])
+ {
+ NvU32 bit;
+ NvU32 index;
+
+ bit = (31 - CountLeadingZeros(vecs[i]));
+ index = (i * 32) + bit;
+
+ // clear the MSI bit in the register and in the shadow
+ vecs[i] &= ~(1<<bit);
+ pcie_regw(hRm, PcieRegType_AFI, AFI_MSI_VEC0_0 + i * 4, 0x01 << bit);
+
+ if (MSIHandlers[index].sem)
+ {
+ NvOsSemaphoreSignal(MSIHandlers[i].sem);
+ } else
+ {
+ NvOsDebugPrintf("unhandled MSI %08x %08x\n", i,bit);
+ }
+ }
+ }
+ NvRmInterruptDone(s_pcieMSIHandle);
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_pcie_private.h b/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_pcie_private.h
new file mode 100644
index 000000000000..21761afb6bb4
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_pcie_private.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef INCLUDED_AP20RM_PCIE_PRIVATE_H
+#define INCLUDED_AP20RM_PCIE_PRIVATE_H
+
+#include "nvcommon.h"
+#include "ap20/dev_ap_pcie2_root_port.h"
+#include "ap20/dev_ap_pcie2_pads.h"
+#include "ap20/arafi.h"
+#include "nvrm_drf.h"
+
+
+/*
+ * AXI address map for the PCIe aperture. AP20, defines 1GB in the AXI address
+ * map for PCIe.
+ *
+ * That address space is split into different regions, with sizes and offsets
+ * as follows.
+ *
+ * 0x8000_0000 to 0x80ff_ffff - Register space 16MB.
+ * 0x8100_0000 to 0x81ff_ffff - Config space 16MB.
+ * 0x8200_0000 to 0x82ff_ffff - Extended config space 16MB.
+ * 0x8300_0000 to 0x83ff_ffff - Downstream IO space
+ * ... Will be filled with other BARS like MSI/upstream IO etc.
+ * 0x9000_0000 to 0x9fff_ffff - non-prefetchable memory aperture
+ * 0xa000_0000 to 0xbfff_ffff - Prefetchable memory aperture
+ *
+ * Config and Extended config sizes are choosen to support maximum of 256 devices,
+ * which is good enough for all the AP20 use cases.
+ *
+ * */
+#define NVRM_PCIE_REGISTER_APERTURE_SIZE 0x1000000UL
+#define NVRM_PCIE_CONFIG_OFFSET NVRM_PCIE_REGISTER_APERTURE_SIZE
+#define NVRM_PCIE_CONFIG_SIZE 0x1000000UL
+#define NVRM_PCIE_EXTENDED_CONFIG_OFFSET NVRM_PCIE_CONFIG_SIZE + NVRM_PCIE_CONFIG_OFFSET
+#define NVRM_PCIE_EXTENDED_CONFIG_SIZE 0x1000000UL
+#define NVRM_PCIE_DOWNSTREAM_IO_OFFSET NVRM_PCIE_EXTENDED_CONFIG_SIZE + NVRM_PCIE_EXTENDED_CONFIG_OFFSET
+#define NVRM_PCIE_DOWNSTREAM_IO_SIZE 0x100000UL
+/*... some room for the other BARs */
+#define NVRM_PCIE_NON_PREFETCH_MEMORY_OFFSET 0x10000000UL
+#define NVRM_PCIE_NON_PREFETCH_MEMORY_SIZE 0x10000000UL
+#define NVRM_PCIE_PREFETCH_MEMORY_OFFSET NVRM_PCIE_NON_PREFETCH_MEMORY_OFFSET + NVRM_PCIE_NON_PREFETCH_MEMORY_SIZE
+#define NVRM_PCIE_PREFETCH_MEMORY_SIZE 0x20000000UL
+
+/*
+ * PCI address map for memory mapped devices. Still using 32-bit aperture.
+ *
+ * 1GB for the system memory.
+ * 1GB for the non pre-fetchable memory
+ * 1GB for the pre-fetchable memory
+ *
+ * Though all the PCI devices gets mapped to this address ranges, ARM cannot
+ * see the entire non-prefectable/prefectable memory as the AXI address map
+ * only has 768MB for the total non-prefectable/prefetchable memory. See the
+ * above defines for that address map.
+ */
+#define FPCI_SYSTEM_MEMORY_OFFSET 0x0UL
+#define FPCI_SYSTEM_MEMORY_SIZE 0x40000000UL
+#define FPCI_NON_PREFETCH_MEMORY_OFFSET (FPCI_SYSTEM_MEMORY_OFFSET + FPCI_SYSTEM_MEMORY_SIZE)
+#define FPCI_NON_PREFETCH_MEMORY_SIZE 0x40000000UL
+#define FPCI_PREFETCH_MEMORY_OFFSET (FPCI_NON_PREFETCH_MEMORY_OFFSET + FPCI_NON_PREFETCH_MEMORY_SIZE)
+#define FPCI_PREFETCH_MEMORY_SIZE 0x40000000UL
+
+/* Lower 16K of the PCIE apperture has root port registers. There are 4 groups
+ * of root port registers.
+ *
+ * 1. AFI registers - AFI is a wrapper between PCIE and ARM AXI bus. These
+ * registers define the address translation registers, interrupt registers and
+ * some configuration (a.k.a CYA) registers.
+ * 2. PAD registers - PAD control registers which are inside the PCIE CORE.
+ * 3. Configuration 0 and Configuration 1 registers - These registers are PCIe
+ * configuration registers of Root port 0 and root port 1.
+ *
+ * Check the PcieRegType enumeration for the list of Registers banks inside the
+ * PCIE aperture.
+ *
+ * */
+#define NV_PCIE_AXI_AFI_REGS_OFSET 0x3800
+#define NV_PCIE_AXI_PADS_OFSET 0x3000
+#define NV_PCIE_AXI_RP_T0C0_OFFSET 0x0000
+#define NV_PCIE_AXI_RP_T0C1_OFFSET 0x1000
+
+#define NVRM_PCIE_MAX_DEVICES 256
+
+/* PCIE DRF macros to read and write PRI registers */
+
+/** NVPCIE_DRF_DEF - define a new register value.
+
+ @param d register domain (hardware block)
+ @param r register name
+ @param f register field
+ @param c defined value for the field
+ */
+#define NVPCIE_DRF_DEF(d,r,f,c) \
+ ((NV_PROJ__PCIE2_##d##_##r##_##f##_##c) << NV_FIELD_SHIFT(NV_PROJ__PCIE2_##d##_##r##_##f))
+
+/** NVPCIE_DRF_NUM - define a new register value.
+
+ @param d register domain (hardware block)
+ @param r register name
+ @param f register field
+ @param n numeric value for the field
+ */
+#define NVPCIE_DRF_NUM(d,r,f,n) \
+ (((n)& NV_FIELD_MASK(NV_PROJ__PCIE2_##d##_##r##_##f)) << \
+ NV_FIELD_SHIFT(NV_PROJ__PCIE2_##d##_##r##_##f))
+
+/** NVPCIE_DRF_VAL - read a field from a register.
+
+ @param d register domain (hardware block)
+ @param r register name
+ @param f register field
+ @param v register value
+ */
+#define NVPCIE_DRF_VAL(d,r,f,v) \
+ (((v)>> NV_FIELD_SHIFT(NV_PROJ__PCIE2_##d##_##r##_##f)) & \
+ NV_FIELD_MASK(NV_PROJ__PCIE2_##d##_##r##_##f))
+
+/** NVPCIE_FLD_SET_DRF_NUM - modify a register field.
+
+ @param d register domain (hardware block)
+ @param r register name
+ @param f register field
+ @param n numeric field value
+ @param v register value
+ */
+#define NVPCIE_FLD_SET_DRF_NUM(d,r,f,n,v) \
+ ((v & ~NV_FIELD_SHIFTMASK(NV_PROJ__PCIE2_##d##_##r##_##f)) | NVPCIE_DRF_NUM(d,r,f,n))
+
+/** NVPCIE_FLD_SET_DRF_DEF - modify a register field.
+
+ @param d register domain (hardware block)
+ @param r register name
+ @param f register field
+ @param c defined field value
+ @param v register value
+ */
+#define NVPCIE_FLD_SET_DRF_DEF(d,r,f,c,v) \
+ (((v) & ~NV_FIELD_SHIFTMASK(NV_PROJ__PCIE2_##d##_##r##_##f)) | \
+ NVPCIE_DRF_DEF(d,r,f,c))
+
+/** NVPCIE_RESETVAL - get the reset value for a register.
+
+ @param d register domain (hardware block)
+ @param r register name
+ */
+#define NVPCIE_RESETVAL(d,r) (d##_##r##_0_RESET_VAL)
+
+/* Major PCIE register banks */
+typedef enum {
+ PcieRegType_AFI,
+ PcieRegType_CFG0,
+ PcieRegType_CFG1,
+ PcieRegType_PADS,
+ PcieRegType_Force32 = 0x7FFFFFFF,
+} PcieRegType;
+
+typedef enum
+{
+ NvRmPciResource_Io,
+ NvRmPciResource_PrefetchMemory,
+ NvRmPciResource_NonPrefetchMemory,
+ NvRmPciResource_Force32 = 0x7FFFFFFF,
+
+} NvRmPciResource;
+
+
+typedef struct NvRmPciDeviceRec
+{
+ /// Bus and device number encoding of the PCEIe device/bridge
+ NvU32 bus;
+ /// Secondary bus nummber. Non-zero only for bridge devices.
+ NvU32 sec_bus;
+ /// Subordinate bus number. Non-zero only for the bridge devices.
+ NvU32 sub_bus;
+ /// Device ID/vendor ID of the PCI device/bridge. upper 16 bits are device
+ /// ID and lower 16 bits are vendor ID.
+ NvU32 id;
+
+ /// Base address registers of PCIe devices.
+ NvU32 bar_base[6];
+ NvU32 bar_size[6];
+ NvRmPciResource bar_type[6];
+
+ NvU32 io_base;
+ NvU32 io_limit;
+
+ NvU32 mem_base;
+ NvU32 mem_limit;
+ NvU32 mem_max; //highest address available on this's rootport
+
+ NvU32 prefetch_base;
+ NvU32 prefetch_limit;
+ NvU32 prefetch_max; //highest address available on this's rootport
+
+ NvBool IsDisabled;
+
+ struct NvRmPciDeviceRec *parent;
+ struct NvRmPciDeviceRec *next;
+ struct NvRmPciDeviceRec *prev;
+ struct NvRmPciDeviceRec *child;
+ NvBool isRootPort;
+} NvRmPciDevice;
+
+
+#define MAX_PCI_DEVICES 64
+
+
+NvError NvRmPrivPcieOpen(NvRmDeviceHandle hDeviceHandle);
+void NvRmPrivPcieClose(NvRmDeviceHandle hDeviceHandle);
+
+
+
+#endif // INCLUDED_AP20RM_PCIE_PRIVATE_H
+
diff --git a/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_slink_hw_private.c b/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_slink_hw_private.c
new file mode 100644
index 000000000000..d55960a12edf
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/ap20/ap20rm_slink_hw_private.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2008-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/**
+ * @file
+ * @brief <b>nVIDIA driver Development Kit:
+ * Private functions implementation for the slink Rm driver</b>
+ *
+ * @b Description: Implements the private functions for the slink hw interface.
+ *
+ */
+
+// hardware includes
+#include "ap20/arslink.h"
+#include "../ap15/rm_spi_slink_hw_private.h"
+#include "nvrm_drf.h"
+#include "nvrm_hardware_access.h"
+#include "nvassert.h"
+#include "nvos.h"
+
+// Enable the hw based chipselect
+#define ENABLE_HW_BASED_CS 0
+
+#define SLINK_REG_READ32(pSlinkHwRegsVirtBaseAdd, reg) \
+ NV_READ32((pSlinkHwRegsVirtBaseAdd) + ((SLINK_##reg##_0)/4))
+#define SLINK_REG_WRITE32(pSlinkHwRegsVirtBaseAdd, reg, val) \
+ do { \
+ NV_WRITE32((((pSlinkHwRegsVirtBaseAdd) + ((SLINK_##reg##_0)/4))), (val)); \
+ } while(0)
+
+
+#define MAX_SLINK_FIFO_DEPTH 32
+
+#define ALL_SLINK_STATUS_CLEAR \
+ (NV_DRF_NUM(SLINK, STATUS, RDY, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, RX_UNF, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, TX_UNF, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, TX_OVF, 1) | \
+ NV_DRF_NUM(SLINK, STATUS, RX_OVF, 1))
+
+static void
+SlinkHwSetSignalMode(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvOdmQuerySpiSignalMode SignalMode);
+
+/**
+ * Initialize the slink register.
+ */
+static void
+SlinkHwRegisterInitialize(
+ NvU32 SlinkInstanceId,
+ SerialHwRegisters *pSlinkHwRegs)
+{
+ NvU32 CommandReg1;
+ NvU32 CommandReg2;
+ pSlinkHwRegs->InstanceId = SlinkInstanceId;
+ pSlinkHwRegs->pRegsBaseAdd = NULL;
+ pSlinkHwRegs->RegBankSize = 0;
+ pSlinkHwRegs->HwTxFifoAdd = SLINK_TX_FIFO_0;
+ pSlinkHwRegs->HwRxFifoAdd = SLINK_RX_FIFO_0;
+ pSlinkHwRegs->IsPackedMode = NV_FALSE;
+ pSlinkHwRegs->PacketLength = 1;
+ pSlinkHwRegs->CurrSignalMode = NvOdmQuerySpiSignalMode_Invalid;
+ pSlinkHwRegs->MaxWordTransfer = MAX_SLINK_FIFO_DEPTH;
+ pSlinkHwRegs->IsLsbFirst = NV_FALSE;
+ pSlinkHwRegs->IsMasterMode = NV_TRUE;
+ pSlinkHwRegs->IsNonWordAlignedPackModeSupported = NV_TRUE;
+
+ CommandReg1 = NV_RESETVAL(SLINK, COMMAND);
+ CommandReg2 = NV_RESETVAL(SLINK, COMMAND2);
+
+#if ENABLE_HW_BASED_CS
+ // Initialize the chip select bits to select the h/w only
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CS_SW, HARD, CommandReg1);
+
+ // Do not toggle the CS between each packet.
+ // HIGH: CS active between two packets
+ // LOW: CS inactive between two packets
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, CS_ACTIVE_BETWEEN, HIGH, CommandReg2);
+#else
+ // Initialize the chip select bits to select the s/w only
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CS_SW, SOFT, CommandReg1);
+ // Set chip select to normal high level. (inverted polarity).
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CS_VALUE, HIGH, CommandReg1);
+#endif
+
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, M_S, MASTER, CommandReg1);
+
+ if (pSlinkHwRegs->IsIdleDataOutHigh)
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, IDLE_SDA, DRIVE_HIGH, CommandReg1);
+ else
+ CommandReg1 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, IDLE_SDA, DRIVE_LOW, CommandReg1);
+
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command1 = CommandReg1;
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command2 = CommandReg2;
+ pSlinkHwRegs->HwRegs.SlinkRegs.Status = NV_RESETVAL(SLINK, STATUS);
+ pSlinkHwRegs->HwRegs.SlinkRegs.DmaControl = NV_RESETVAL(SLINK, DMA_CTL);
+}
+
+/**
+ * Set the signal mode of communication whether this is the mode 0, 1, 2 or 3.
+ */
+static void
+SlinkHwSetSignalMode(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvOdmQuerySpiSignalMode SignalMode)
+{
+ NvU32 CommandReg = pSlinkHwRegs->HwRegs.SlinkRegs.Command1;
+ switch (SignalMode)
+ {
+ case NvOdmQuerySpiSignalMode_0:
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, IDLE_SCLK,
+ DRIVE_LOW, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CK_SDA, FIRST_CLK_EDGE,
+ CommandReg);
+ break;
+
+ case NvOdmQuerySpiSignalMode_1:
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, IDLE_SCLK,
+ DRIVE_LOW, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CK_SDA, SECOND_CLK_EDGE,
+ CommandReg);
+ break;
+
+ case NvOdmQuerySpiSignalMode_2:
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, IDLE_SCLK,
+ DRIVE_HIGH, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CK_SDA, FIRST_CLK_EDGE,
+ CommandReg);
+ break;
+ case NvOdmQuerySpiSignalMode_3:
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, IDLE_SCLK,
+ DRIVE_HIGH, CommandReg);
+ CommandReg = NV_FLD_SET_DRF_DEF(SLINK, COMMAND, CK_SDA, SECOND_CLK_EDGE,
+ CommandReg);
+ break;
+ default:
+ NV_ASSERT(!"Invalid SignalMode");
+
+ }
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command1 = CommandReg;
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, COMMAND, CommandReg);
+ pSlinkHwRegs->CurrSignalMode = SignalMode;
+}
+
+/**
+ * Set the chip select signal level to be default based on device during the
+ * initialization.
+ */
+static void
+SlinkHwSetChipSelectDefaultLevelFxn(
+ SerialHwRegisters *pHwRegs,
+ NvU32 ChipSelectId,
+ NvBool IsHigh)
+{
+ NvU32 CommandReg1 = pHwRegs->HwRegs.SlinkRegs.Command1;
+ NvU32 CSPolVal = (IsHigh)?1:0;
+ switch (ChipSelectId)
+ {
+ case 0:
+ CommandReg1 = NV_FLD_SET_DRF_NUM(SLINK, COMMAND, CS_POLARITY0,
+ CSPolVal, CommandReg1);
+ break;
+
+ case 1:
+ CommandReg1 = NV_FLD_SET_DRF_NUM(SLINK, COMMAND, CS_POLARITY1,
+ CSPolVal, CommandReg1);
+ break;
+
+ case 2:
+ CommandReg1 = NV_FLD_SET_DRF_NUM(SLINK, COMMAND, CS_POLARITY2,
+ CSPolVal, CommandReg1);
+ break;
+
+ case 3:
+ CommandReg1 = NV_FLD_SET_DRF_NUM(SLINK, COMMAND, CS_POLARITY3,
+ CSPolVal, CommandReg1);
+ break;
+
+ default:
+ NV_ASSERT(!"Invalid ChipSelectId");
+ }
+ pHwRegs->HwRegs.SlinkRegs.Command1 = CommandReg1;
+ SLINK_REG_WRITE32(pHwRegs->pRegsBaseAdd, COMMAND, CommandReg1);
+}
+
+static void
+SlinkHwSetCSActiveForTotalWordsFxn(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvU32 TotalWords)
+{
+ NvU32 CommandReg2 = pSlinkHwRegs->HwRegs.SlinkRegs.Command2;
+ NvU32 Refills =0;
+ Refills = ((TotalWords / MAX_SLINK_FIFO_DEPTH) -1);
+ NV_ASSERT(Refills <=3);
+ CommandReg2 = NV_FLD_SET_DRF_NUM(SLINK, COMMAND2, FIFO_REFILLS, Refills, CommandReg2);
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, COMMAND2, CommandReg2);
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command2 = CommandReg2;
+}
+
+/**
+ * Set the chip select signal level.
+ */
+static void
+SlinkHwSetChipSelectLevel(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvU32 ChipSelectId,
+ NvBool IsHigh)
+{
+ NvU32 CommandReg2 = pSlinkHwRegs->HwRegs.SlinkRegs.Command2;
+
+#if !ENABLE_HW_BASED_CS
+ SlinkHwSetChipSelectDefaultLevelFxn(pSlinkHwRegs, ChipSelectId, IsHigh);
+#endif
+ switch (ChipSelectId)
+ {
+ case 0:
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, SS_EN, CS0, CommandReg2);
+ break;
+
+ case 1:
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, SS_EN, CS1, CommandReg2);
+ break;
+
+ case 2:
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, SS_EN, CS2, CommandReg2);
+ break;
+
+ case 3:
+ CommandReg2 = NV_FLD_SET_DRF_DEF(SLINK, COMMAND2, SS_EN, CS3, CommandReg2);
+ break;
+
+ default:
+ NV_ASSERT(!"Invalid ChipSelectId");
+ }
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command2 = CommandReg2;
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, COMMAND2,
+ pSlinkHwRegs->HwRegs.SlinkRegs.Command2);
+}
+
+/**
+ * Write into the transmit fifo register.
+ * returns the number of words written.
+ */
+static NvU32
+SlinkHwWriteInTransmitFifo(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvU32 *pTxBuff,
+ NvU32 WordRequested)
+{
+ NvU32 WordWritten = 0;
+ NvU32 WordsRemaining;
+ NvU32 SlinkFifoEmptyCountReg = SLINK_REG_READ32(pSlinkHwRegs->pRegsBaseAdd, STATUS2);
+ SlinkFifoEmptyCountReg = NV_DRF_VAL(SLINK, STATUS2, TX_FIFO_EMPTY_COUNT, SlinkFifoEmptyCountReg);
+ WordsRemaining = NV_MIN(WordRequested, SlinkFifoEmptyCountReg);
+ WordWritten = WordsRemaining;
+ while (WordsRemaining)
+ {
+ SLINK_REG_WRITE32(pSlinkHwRegs->pRegsBaseAdd, TX_FIFO, *pTxBuff);
+ pTxBuff++;
+ WordsRemaining--;
+ }
+ return WordWritten;
+}
+
+/**
+ * Read the data from the receive fifo.
+ * Returns the number of words it read.
+ */
+static NvU32
+SlinkHwReadFromReceiveFifo(
+ SerialHwRegisters *pSlinkHwRegs,
+ NvU32 *pRxBuff,
+ NvU32 WordRequested)
+{
+ NvU32 WordsRemaining;
+ NvU32 SlinkFifoFullCountReg = SLINK_REG_READ32(pSlinkHwRegs->pRegsBaseAdd, STATUS2);
+ NvU32 WordsRead;
+
+ SlinkFifoFullCountReg = NV_DRF_VAL(SLINK, STATUS2, RX_FIFO_FULL_COUNT, SlinkFifoFullCountReg);
+ WordsRemaining = NV_MIN(WordRequested, SlinkFifoFullCountReg);
+ WordsRead = WordsRemaining;
+ while (WordsRemaining)
+ {
+ *pRxBuff = SLINK_REG_READ32(pSlinkHwRegs->pRegsBaseAdd, RX_FIFO);
+ pRxBuff++;
+ WordsRemaining--;
+ }
+ return WordsRead;
+}
+
+/**
+ * Initialize the slink intterface for the hw access.
+ */
+void NvRmPrivSpiSlinkInitSlinkInterface_v1_1(HwInterface *pSlinkInterface)
+{
+ pSlinkInterface->HwRegisterInitializeFxn = SlinkHwRegisterInitialize;
+ pSlinkInterface->HwSetSignalModeFxn = SlinkHwSetSignalMode;
+ pSlinkInterface->HwSetChipSelectDefaultLevelFxn = SlinkHwSetChipSelectDefaultLevelFxn;
+ pSlinkInterface->HwSetChipSelectLevelFxn = SlinkHwSetChipSelectLevel;
+ pSlinkInterface->HwWriteInTransmitFifoFxn = SlinkHwWriteInTransmitFifo;
+ pSlinkInterface->HwReadFromReceiveFifoFxn = SlinkHwReadFromReceiveFifo;
+ pSlinkInterface->HwSetCSActiveForTotalWordsFxn = SlinkHwSetCSActiveForTotalWordsFxn;
+}
diff --git a/arch/arm/mach-tegra/nvrm/io/common/Makefile b/arch/arm/mach-tegra/nvrm/io/common/Makefile
new file mode 100644
index 000000000000..5fd5ff37820a
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/common/Makefile
@@ -0,0 +1,12 @@
+ccflags-y += -DNV_IS_AVP=0
+ccflags-y += -DNV_OAL=0
+ccflags-y += -DNV_USE_FUSE_CLOCK_ENABLE=0
+ifeq ($(CONFIG_MACH_TEGRA_GENERIC_DEBUG),y)
+ccflags-y += -DNV_DEBUG=1
+else
+ccflags-y += -DNV_DEBUG=0
+endif
+
+obj-y += nvrm_i2c.o
+obj-y += nvrm_gpioi2c.o
+obj-y += nvrm_owr.o
diff --git a/arch/arm/mach-tegra/nvrm/io/common/nvrm_gpioi2c.c b/arch/arm/mach-tegra/nvrm/io/common/nvrm_gpioi2c.c
new file mode 100644
index 000000000000..8dd5d910047b
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/common/nvrm_gpioi2c.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "nvrm_i2c_private.h"
+#include "nvassert.h"
+
+#define NVRM_SOFT_I2C_ENABLE_PRINTF (0)
+
+#if (NV_DEBUG && NVRM_SOFT_I2C_ENABLE_PRINTF)
+#define I2C_DUMP1(x) NvOsDebugPrintf x
+#define I2C_DUMP(x) NvOsDebugPrintf x
+#else
+#define I2C_DUMP1(x)
+#define I2C_DUMP(x)
+#endif
+
+#define WAIT_USEC(x) NvOsWaitUS(x)
+
+static void I2CSetHigh( NvRmI2cController *c );
+static void I2CStart( NvRmI2cController *c );
+static void I2CStop( NvRmI2cController *c );
+
+static NvU8 I2CReadByte( NvRmI2cController *c );
+static NvError I2CWriteByte( NvRmI2cController *c, NvU8 data);
+
+static NvU8 I2CReadBit( NvRmI2cController *c );
+static NvError I2CWriteBit( NvRmI2cController *c, NvU8 bit);
+
+NV_INLINE static void I2CClockHigh(NvRmI2cController *c);
+NV_INLINE static void I2CClockLow(NvRmI2cController *c);
+NV_INLINE static void I2CDataHigh( NvRmI2cController *c );
+NV_INLINE static void I2CDataLow(NvRmI2cController *c);
+NV_INLINE static void I2CWaitDataHigh(NvRmI2cController *c);
+NV_INLINE static NvU8 I2CDataRead( NvRmI2cController *c );
+
+NvError
+NvRmGpioI2cRead( NvRmI2cController *c,
+ NvU32 slaveAddr,
+ NvU8 *pDataBytes,
+ NvU32 len,
+ NvU32 flags);
+
+NvError
+NvRmGpioI2cWrite( NvRmI2cController *c,
+ NvU32 slaveAddr,
+ NvU8 *pDataBytes,
+ NvU32 len,
+ NvU32 flags);
+
+NvError NvRmGpioI2cTransaction(
+ NvRmI2cController *c,
+ NvU32 I2cPinMap,
+ NvU8 *Data,
+ NvU32 DataLength,
+ NvRmI2cTransactionInfo * Transaction,
+ NvU32 NumOfTransactions)
+{
+ NvU32 i;
+ NvError status = NvSuccess;
+ NvU32 clockPeriod;
+ NvRmGpioPinState val = 0;
+
+ NV_ASSERT(Transaction);
+ NV_ASSERT(Data);
+ NV_ASSERT((c->hSdaPin && !I2cPinMap) ||
+ (!c->hSdaPin && I2cPinMap));
+
+ /* Convert frequency to period */
+ clockPeriod = (NvU32)(1000 / c->clockfreq);
+ if (clockPeriod * c->clockfreq < 1000)
+ {
+ /* This is a ciel operation */
+ clockPeriod++;
+ }
+ c->I2cClockPeriod = clockPeriod;
+
+ if (I2cPinMap)
+ {
+ NvU32 scl, sda;
+ if ((c->GetGpioPins)(c, I2cPinMap, &scl, &sda))
+ {
+ status = NvRmGpioAcquirePinHandle(c->hGpio, (scl>>16), (scl&0xffff),
+ &c->hSclPin);
+ if(!status)
+ status = NvRmGpioAcquirePinHandle(c->hGpio, (sda>>16), (sda&0xffff),
+ &c->hSdaPin);
+ if(status)
+ {
+ NvRmGpioReleasePinHandles(c->hGpio, &c->hSclPin, 1);
+ NvRmGpioReleasePinHandles(c->hGpio, &c->hSdaPin, 1);
+ c->hSclPin = 0;
+ c->hSdaPin = 0;
+ return status;
+ }
+ }
+ else
+ return NvError_NotSupported;
+ }
+
+ NV_ASSERT(c->hSclPin && c->hSdaPin);
+
+ I2C_DUMP1(("Clock period = %d", clockPeriod));
+
+ /* Load the outputs register to 0, as we always drive the pin low, if at all
+ * we are driving the pin. Otherwise, we make put the pin input mode,
+ * causing the pin to be tristated. */
+ NvRmGpioWritePins(c->hGpio, &c->hSdaPin, &val, 1);
+ NvRmGpioWritePins(c->hGpio, &c->hSclPin, &val, 1);
+
+ NvRmGpioConfigPins(c->hGpio, &c->hSdaPin, 1, NvRmGpioPinMode_InputData);
+ NvRmGpioConfigPins(c->hGpio, &c->hSclPin, 1, NvRmGpioPinMode_InputData);
+
+ /* No support yet for repeat start */
+ i = 0;
+ while ( i < NumOfTransactions )
+ {
+ if ( Transaction[i].Flags & NVRM_I2C_WRITE )
+ {
+ status = NvRmGpioI2cWrite(c, Transaction[i].Address,
+ Data, Transaction[i].NumBytes, Transaction[i].Flags);
+ }
+ else if ( Transaction[i].Flags & NVRM_I2C_READ )
+ {
+ status = NvRmGpioI2cRead(c, Transaction[i].Address, Data,
+ Transaction[i].NumBytes, Transaction[i].Flags);
+ }
+ Data += Transaction[i].NumBytes;
+ i++;
+
+ if (status != NvSuccess)
+ break;
+ }
+
+ /* Put back the pins in function mode */
+ NvRmGpioConfigPins(c->hGpio, &c->hSdaPin, 1, NvRmGpioPinMode_Function);
+ NvRmGpioConfigPins(c->hGpio, &c->hSclPin, 1, NvRmGpioPinMode_Function);
+
+ if (I2cPinMap)
+ {
+ NvRmGpioReleasePinHandles(c->hGpio, &c->hSclPin, 1);
+ NvRmGpioReleasePinHandles(c->hGpio, &c->hSdaPin, 1);
+ c->hSclPin = 0;
+ c->hSdaPin = 0;
+ }
+
+ return status;
+}
+
+NvError
+NvRmGpioI2cRead( NvRmI2cController *c,
+ NvU32 slaveAddr,
+ NvU8 *pDataBytes,
+ NvU32 len,
+ NvU32 flags)
+{
+ NV_ASSERT(c->hGpio);
+
+ /* LSB is always 1 for reads */
+ slaveAddr = slaveAddr | 0x1;
+ I2CStart( c );
+
+ if (I2CWriteByte( c, (NvU8)slaveAddr) != NvSuccess)
+ {
+ I2C_DUMP1(("I2CReadPacket : no ACK for the slave address %x", (slaveAddr >> 1)));
+ I2CStop( c );
+ return NvError_I2cDeviceNotFound;
+ }
+
+ while ( len-- )
+ {
+ *pDataBytes++ = I2CReadByte( c );
+
+ /* For all reads execpt the last byte, master should send the ACK. For
+ * the last byte, it should send the NAK */
+ if (!len)
+ {
+ I2CDataHigh( c );
+ } else
+ {
+ I2CDataLow( c );
+ }
+
+ /* Pulse the clock line */
+ I2CClockHigh( c );
+ WAIT_USEC( (c->I2cClockPeriod + 1) / 2 );
+ I2CClockLow( c );
+ WAIT_USEC( (c->I2cClockPeriod + 1) / 2 );
+
+ /* Release the data line */
+ I2CDataHigh( c );
+ }
+
+ if (flags & NVRM_I2C_NOSTOP)
+ {
+ I2CSetHigh( c );
+ } else
+ {
+ I2CStop( c );
+ }
+
+ return NvSuccess;
+}
+
+NvError
+NvRmGpioI2cWrite( NvRmI2cController *c,
+ NvU32 slaveAddr,
+ NvU8 *pDataBytes,
+ NvU32 len,
+ NvU32 flags)
+{
+ NvError err = NvSuccess;
+
+ NV_ASSERT(c);
+
+ slaveAddr = slaveAddr & ~0x1;
+
+ I2CStart( c );
+
+ if (I2CWriteByte( c, (NvU8)slaveAddr ) != NvSuccess)
+ {
+ I2C_DUMP1(("I2CWrite : no ACK for the slave address %x", slaveAddr));
+ err = NvError_I2cDeviceNotFound;
+ goto fail;
+ }
+
+ while ( len-- )
+ {
+ if (I2CWriteByte( c, *pDataBytes++ ) != NvSuccess)
+ {
+ I2C_DUMP(("I2CWrite: no ACK for the data\r\n"));
+ err = NvError_I2cDeviceNotFound;
+ goto fail;
+ }
+ }
+
+ if (flags & NVRM_I2C_NOSTOP)
+ {
+ I2CSetHigh(c);
+ } else
+fail:
+ {
+ I2CStop( c );
+ }
+ return err;
+}
+
+static void
+I2CSetHigh( NvRmI2cController *c )
+{
+ I2CWaitDataHigh( c );
+ I2CClockHigh( c );
+}
+
+static void
+I2CStart( NvRmI2cController *c )
+{
+
+ I2CDataLow( c );
+ I2CClockLow( c );
+
+}
+
+static void
+I2CStop( NvRmI2cController *c )
+{
+
+ I2CDataLow( c );
+ I2CClockHigh( c );
+ I2CDataHigh( c );
+
+}
+
+static NvU8
+I2CReadByte( NvRmI2cController *c )
+{
+ int ctr;
+ NvU8 data;
+
+
+ data = 0;
+ for ( ctr = 0; ctr < 8; ctr++ )
+ {
+ data = (data << 1) | I2CReadBit( c );
+ }
+
+ return data;
+}
+
+static NvError
+I2CWriteByte( NvRmI2cController *c,
+ NvU8 data )
+{
+ NvU32 err = NvSuccess;
+ NvU32 SDA = 0;
+ NvU8 ctr, bit;
+
+ for ( ctr = 0; ctr < 8; ctr++ )
+ {
+ bit = (data >> (7 - ctr)) & 0x01;
+ (void)I2CWriteBit( c, bit );
+ }
+
+ /* Wait for ACK from slave i.e tristate the Data and pulse the clock and
+ * check if the data line is driven low during the clock high stage.
+ */
+ I2CDataHigh( c );
+ I2CClockHigh( c );
+
+ WAIT_USEC( (c->I2cClockPeriod + 1) / 2 );
+
+ SDA = I2CDataRead( c );
+ if (SDA)
+ {
+ err = NvError_I2cDeviceNotFound;
+ }
+
+ I2CClockLow( c );
+
+ WAIT_USEC( (c->I2cClockPeriod + 1) / 2 );
+
+ return err;
+}
+
+static NvU8
+I2CReadBit( NvRmI2cController *c )
+{
+ NvU8 SDA = 0;
+
+ I2CDataHigh( c ); // DATA set to high first
+ I2CClockHigh( c );
+
+ WAIT_USEC( (c->I2cClockPeriod + 1) / 2 );
+
+ SDA = I2CDataRead( c );
+
+ I2CClockLow( c );
+
+ WAIT_USEC( (c->I2cClockPeriod + 1) / 2 );
+
+ return SDA;
+}
+
+static NvError
+I2CWriteBit( NvRmI2cController *c,
+ NvU8 bit )
+{
+ if ( bit & 0x1 )
+ I2CDataHigh( c );
+ else
+ I2CDataLow( c );
+
+ I2CClockHigh( c );
+ WAIT_USEC( (c->I2cClockPeriod + 1) / 2 );
+ I2CClockLow( c );
+ WAIT_USEC( (c->I2cClockPeriod + 1) / 2 );
+
+ return NvSuccess;
+}
+
+static void
+I2CClockHigh( NvRmI2cController *c )
+{
+ // The scheme is to make SCL pin in tri-state, thus depends on
+ // outside pull-up to generate High condition. To be in this
+ // tri-state, enable SCL pin with IN direction. Then, always
+ // clear the latched SDA and SCL bits in the register in preparation
+ // for any next switching to Data Low condition (pin direction changed
+ // to OUT).
+ NvU32 timeout = c->timeout * 1000 / c->I2cClockPeriod ;
+ NvU32 inout;
+
+ NvRmGpioConfigPins(c->hGpio, &c->hSclPin, 1, NvRmGpioPinMode_InputData);
+
+ // check whether slave doesn't hold SCL low
+ // if so, wait for certain timeout for release by slave
+ do
+ {
+ WAIT_USEC( c->I2cClockPeriod );
+ NvRmGpioReadPins(c->hGpio, &c->hSclPin, (NvRmGpioPinState *)&inout, 1);
+ if ( inout )
+ {
+ return;
+ }
+ } while ( timeout-- );
+}
+
+NV_INLINE static void
+I2CClockLow( NvRmI2cController *c )
+{
+ NvRmGpioConfigPins(c->hGpio, &c->hSclPin, 1, NvRmGpioPinMode_Output);
+}
+
+NV_INLINE static void
+I2CDataHigh( NvRmI2cController *c )
+{
+ NvRmGpioConfigPins(c->hGpio, &c->hSdaPin, 1, NvRmGpioPinMode_InputData);
+}
+
+NV_INLINE static void
+I2CDataLow( NvRmI2cController *c )
+{
+ NvRmGpioConfigPins(c->hGpio, &c->hSdaPin, 1, NvRmGpioPinMode_Output);
+}
+
+NV_INLINE static void
+I2CWaitDataHigh( NvRmI2cController *c )
+{
+ NvU32 timeout = c->timeout * 1000 / c->I2cClockPeriod ;
+ NvU32 inout;
+
+ do
+ {
+ WAIT_USEC( c->I2cClockPeriod );
+
+ NvRmGpioConfigPins(c->hGpio, &c->hSdaPin, 1, NvRmGpioPinMode_InputData);
+ NvRmGpioReadPins(c->hGpio, &c->hSdaPin, (NvRmGpioPinState *)&inout, 1);
+ if ( inout )
+ {
+ return;
+ }
+ } while ( timeout-- );
+}
+
+NV_INLINE static NvU8
+I2CDataRead( NvRmI2cController *c )
+{
+ NvU32 data;
+
+ NvRmGpioConfigPins(c->hGpio, &c->hSdaPin, 1, NvRmGpioPinMode_InputData);
+ NvRmGpioReadPins(c->hGpio, &c->hSdaPin, (NvRmGpioPinState *)&data, 1);
+
+ return (NvU8)(data);
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/common/nvrm_i2c.c b/arch/arm/mach-tegra/nvrm/io/common/nvrm_i2c.c
new file mode 100644
index 000000000000..6cd4524d50a6
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/common/nvrm_i2c.c
@@ -0,0 +1,652 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+
+/** @file
+ * @brief <b>NVIDIA Driver Development Kit: I2C API</b>
+ *
+ * @b Description: Contains the NvRM I2C implementation.
+ */
+
+#include "nvrm_i2c.h"
+#include "nvrm_i2c_private.h"
+#include "nvrm_drf.h"
+#include "nvos.h"
+#include "nvrm_module.h"
+#include "nvrm_hardware_access.h"
+#include "nvrm_power.h"
+#include "nvrm_interrupt.h"
+#include "nvassert.h"
+#include "nvodm_query_pinmux.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_hwintf.h"
+#include "nvodm_modules.h"
+#include "nvrm_structure.h"
+#include "nvrm_pinmux_utils.h"
+
+/* Array of controllers */
+static NvRmI2cController gs_I2cControllers[MAX_I2C_INSTANCES];
+static NvRmI2cController *gs_Cont = NULL;
+
+// Maximum I2C instances present in this SOC
+static NvU32 MaxI2cControllers;
+static NvU32 MaxDvcControllers;
+static NvU32 MaxI2cInstances;
+
+static NvError PrivI2cSetSpeed(NvRmI2cController *c);
+static NvError PrivI2cConfigurePower(NvRmI2cController *c, NvBool IsEnablePower);
+
+/**
+ * Get the I2C SOC capability.
+ *
+ */
+static void
+I2cGetSocCapabilities(
+ NvRmDeviceHandle hDevice,
+ NvRmModuleID ModuleId,
+ NvU32 Instance,
+ SocI2cCapability *pI2cSocCaps)
+{
+ static SocI2cCapability s_SocI2cCapsList[2];
+ NvRmModuleCapability I2cCapsList[2];
+ SocI2cCapability *pI2cCaps = NULL;
+
+ if (ModuleId == NvRmModuleID_I2c)
+ {
+ I2cCapsList[0].MajorVersion = 1;
+ I2cCapsList[0].MinorVersion = 0;
+ I2cCapsList[0].EcoLevel = 0;
+ I2cCapsList[0].Capability = &s_SocI2cCapsList[0];
+
+ I2cCapsList[1].MajorVersion = 1;
+ I2cCapsList[1].MinorVersion = 1;
+ I2cCapsList[1].EcoLevel = 0;
+ I2cCapsList[1].Capability = &s_SocI2cCapsList[0];
+
+ //AP15 A01P and A02 does not support packet interface
+ s_SocI2cCapsList[0].IsNewMasterAvailable = NV_FALSE;
+
+ I2cCapsList[2].MajorVersion = 1;
+ I2cCapsList[2].MinorVersion = 2;
+ I2cCapsList[2].EcoLevel = 0;
+ I2cCapsList[2].Capability = &s_SocI2cCapsList[1];
+
+ // AP20 supports Packet based interface with new master enable
+ s_SocI2cCapsList[1].IsNewMasterAvailable= NV_TRUE;
+
+ // Get the capability from modules files.
+ NV_ASSERT_SUCCESS(NvRmModuleGetCapabilities(hDevice,
+ NVRM_MODULE_ID(ModuleId, Instance), I2cCapsList,
+ NV_ARRAY_SIZE(I2cCapsList), (void **)&pI2cCaps));
+ }
+ else if (ModuleId == NvRmModuleID_Dvc)
+ {
+ I2cCapsList[0].MajorVersion = 1;
+ I2cCapsList[0].MinorVersion = 0;
+ I2cCapsList[0].EcoLevel = 0;
+ I2cCapsList[0].Capability = &s_SocI2cCapsList[0];
+
+ // AP15 does not support new master interface
+ s_SocI2cCapsList[0].IsNewMasterAvailable= NV_FALSE;
+
+ I2cCapsList[1].MajorVersion = 1;
+ I2cCapsList[1].MinorVersion = 1;
+ I2cCapsList[1].EcoLevel = 0;
+ I2cCapsList[1].Capability = &s_SocI2cCapsList[1];
+
+ // AP20 supports new master interface ans capable of doing the
+ // packed mode transfer.
+ s_SocI2cCapsList[1].IsNewMasterAvailable= NV_TRUE;
+
+ // Get the capability from modules files.
+ NV_ASSERT_SUCCESS(NvRmModuleGetCapabilities(hDevice,
+ NVRM_MODULE_ID(ModuleId, Instance), I2cCapsList,
+ NV_ARRAY_SIZE(I2cCapsList), (void **)&pI2cCaps));
+ }
+ if (pI2cCaps)
+ pI2cSocCaps->IsNewMasterAvailable= pI2cCaps->IsNewMasterAvailable;
+ else
+ NV_ASSERT(!"Invalid ModuleId is passed to I2cGetSocCapabilities() ");
+}
+
+NvError
+NvRmI2cOpen(
+ NvRmDeviceHandle hRmDevice,
+ NvU32 IoModule,
+ NvU32 instance,
+ NvRmI2cHandle *phI2c)
+{
+ NvError status = NvSuccess;
+ NvU32 PrefClockFreq = MAX_I2C_CLOCK_SPEED_KHZ;
+ NvU32 Index = instance;
+ NvRmModuleID ModuleID = NvRmModuleID_I2c;
+ NvRmI2cController *c;
+ NvOsMutexHandle hThreadSaftyMutex = NULL;
+ const NvU32 *pOdmConfigs;
+ NvU32 NumOdmConfigs;
+ NvU32 scl, sda;
+
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(phI2c);
+ NV_ASSERT((IoModule == NvOdmIoModule_I2c) || (IoModule == NvOdmIoModule_I2c_Pmu));
+
+ *phI2c = 0;
+ /* If none of the controller is opened, allocate memory for all controllers
+ * in the system */
+ if (gs_Cont == NULL)
+ {
+ gs_Cont = gs_I2cControllers;
+ MaxI2cControllers = NvRmModuleGetNumInstances(hRmDevice, NvRmModuleID_I2c);
+ MaxDvcControllers = NvRmModuleGetNumInstances(hRmDevice, NvRmModuleID_Dvc);
+ MaxI2cInstances = MaxI2cControllers + MaxDvcControllers;
+ }
+ /* Validate the instance number passed and return the Index of the
+ * controller to the caller.
+ *
+ */
+ if (IoModule == NvOdmIoModule_I2c)
+ {
+ NV_ASSERT(instance < MaxI2cControllers);
+ ModuleID = NvRmModuleID_I2c;
+ Index = instance;
+ }
+ else if (IoModule == NvOdmIoModule_I2c_Pmu)
+ {
+ NV_ASSERT(instance < MaxDvcControllers);
+ ModuleID = NvRmModuleID_Dvc;
+ Index = MaxI2cControllers + instance;
+ }
+ else
+ {
+ NV_ASSERT(!"Invalid IO module");
+ return NvError_NotSupported;
+ }
+
+ c = &(gs_Cont[Index]);
+
+ // Create the mutex for providing the thread safety for i2c API
+ if ((c->NumberOfClientsOpened == 0) && (c->I2cThreadSafetyMutex == NULL))
+ {
+ status = NvOsMutexCreate(&hThreadSaftyMutex);
+ if (status)
+ return status;
+
+ if (NvOsAtomicCompareExchange32((NvS32*)&c->I2cThreadSafetyMutex, 0,
+ (NvS32)hThreadSaftyMutex)!=0)
+ {
+ NvOsMutexDestroy(hThreadSaftyMutex);
+ hThreadSaftyMutex = NULL;
+ }
+ }
+
+ NvOsMutexLock(c->I2cThreadSafetyMutex);
+ // If no clients are opened yet, initialize the i2c controller
+ if (c->NumberOfClientsOpened == 0)
+ {
+ /* Polulate the controller structure */
+ c->hRmDevice = hRmDevice;
+ c->OdmIoModule = IoModule;
+ c->ModuleId = ModuleID;
+ c->Instance = instance;
+
+ c->I2cPowerClientId = 0;
+ c->receive = NULL;
+ c->send = NULL;
+ c->close = NULL;
+ c->GetGpioPins = NULL;
+ c->hGpio = NULL;
+ c->hSclPin = 0;
+ c->hSdaPin = 0;
+
+
+ I2cGetSocCapabilities(hRmDevice, ModuleID, instance, &(c->SocI2cCaps));
+ c->EnableNewMaster = c->SocI2cCaps.IsNewMasterAvailable;
+
+ NvOdmQueryPinMux(IoModule, &pOdmConfigs, &NumOdmConfigs);
+ NV_ASSERT((instance < NumOdmConfigs) && (pOdmConfigs[instance]));
+ if ((instance >= NumOdmConfigs) || (!pOdmConfigs[instance]))
+ {
+ status = NvError_NotSupported;
+ goto fail_1;
+ }
+ c->PinMapConfig = pOdmConfigs[instance];
+
+ /* Call appropriate open function according to the controller
+ * supports packet mode or not. If packet mode is supported
+ * call AP20RmI2cOpen for packet mode funcitons. Other wise
+ * use normal mode */
+ if (c->SocI2cCaps.IsNewMasterAvailable)
+ status = AP20RmI2cOpen(c);
+ else
+ status = AP15RmI2cOpen(c);
+
+ if (status)
+ goto fail_1;
+ /* Make sure that all the functions are polulated by the HAL driver */
+ NV_ASSERT(c->receive && c->send && c->close);
+
+ status = NvRmSetModuleTristate(c->hRmDevice,
+ NVRM_MODULE_ID(c->ModuleId, c->Instance), NV_FALSE);
+ if (status != NvSuccess)
+ {
+ goto fail_1;
+ }
+
+ /* Initalize the GPIO handles */
+ if (c->GetGpioPins)
+ {
+ status = NvRmGpioOpen(c->hRmDevice, &c->hGpio);
+ if(status)
+ goto fail_1;
+ if (c->PinMapConfig != NvOdmI2cPinMap_Multiplexed)
+ {
+ if ((c->GetGpioPins)(c, c->PinMapConfig, &scl, &sda))
+ {
+ status = NvRmGpioAcquirePinHandle(c->hGpio, (scl >> 16),
+ (scl & 0xFFFF), &c->hSclPin);
+ if(!status)
+ {
+ status = NvRmGpioAcquirePinHandle(c->hGpio, (sda >> 16),
+ (sda & 0xFFFF), &c->hSdaPin);
+ if(status)
+ {
+ NvRmGpioReleasePinHandles(c->hGpio, &c->hSclPin, 1);
+ c->hSclPin = 0;
+ goto fail_1;
+ }
+ }
+ }
+ }
+ }
+ status = NvRmPowerRegister(hRmDevice, NULL, &c->I2cPowerClientId);
+ if (status != NvSuccess)
+ {
+ goto fail_2;
+ }
+
+ /* Enable power rail, enable clock, configure clock to right freq,
+ * reset, disable clock, notify to disable power rail.
+ *
+ * All of this is done to just reset the controller.
+ * */
+ PrivI2cConfigurePower(c, NV_TRUE);
+ status = NvRmPowerModuleClockConfig(hRmDevice,
+ NVRM_MODULE_ID(ModuleID, instance), c->I2cPowerClientId,
+ PrefClockFreq, NvRmFreqUnspecified, &PrefClockFreq, 1, NULL, 0);
+ if (status != NvSuccess)
+ {
+ goto fail_3;
+ }
+ NvRmModuleReset(hRmDevice, NVRM_MODULE_ID(ModuleID, instance));
+
+ PrivI2cConfigurePower(c, NV_FALSE);
+ }
+ c->NumberOfClientsOpened++;
+ NvOsMutexUnlock(c->I2cThreadSafetyMutex);
+
+ /*
+ * We cannot return handle with a value of 0, as some clients check the
+ * handle to ne non-zero. So, to get around that we set MSB bit to 1.
+ */
+ *phI2c = (NvRmI2cHandle)(Index | 0x80000000);
+ return NvSuccess;
+
+fail_3:
+ PrivI2cConfigurePower(c, NV_FALSE);
+
+fail_2:
+ NvRmPowerUnRegister(hRmDevice, c->I2cPowerClientId);
+ c->I2cPowerClientId = 0;
+
+fail_1:
+ (c->close)(c);
+ *phI2c = 0;
+ NvRmGpioReleasePinHandles(c->hGpio, &c->hSclPin, 1);
+ NvRmGpioReleasePinHandles(c->hGpio, &c->hSdaPin, 1);
+ NvRmGpioClose(c->hGpio);
+ NvOsMutexUnlock(c->I2cThreadSafetyMutex);
+ NvOsMutexDestroy(c->I2cThreadSafetyMutex);
+ NvOsMemset(c, 0, sizeof(*c));
+
+ return status;
+}
+
+void NvRmI2cClose(NvRmI2cHandle hI2c)
+{
+ NvU32 Index;
+ NvRmI2cController *c;
+
+ if (hI2c == NULL)
+ return;
+
+ Index = ((NvU32) hI2c) & 0xFF;
+ if (Index < MaxI2cInstances)
+ {
+ c = &(gs_Cont[Index]);
+
+ NvOsMutexLock(c->I2cThreadSafetyMutex);
+ c->NumberOfClientsOpened--;
+ if (c->NumberOfClientsOpened == 0)
+ {
+
+ if(c->GetGpioPins)
+ {
+ if (c->hSclPin)
+ NvRmGpioReleasePinHandles(c->hGpio, &c->hSclPin, 1);
+ if (c->hSdaPin)
+ NvRmGpioReleasePinHandles(c->hGpio, &c->hSdaPin, 1);
+ c->hSdaPin = 0;
+ c->hSclPin = 0;
+ }
+ NvRmGpioClose(c->hGpio);
+
+ /* Unregister the power client ID */
+ NvRmPowerUnRegister(c->hRmDevice, c->I2cPowerClientId);
+ c->I2cPowerClientId = 0;
+
+ NV_ASSERT_SUCCESS( NvRmSetModuleTristate(c->hRmDevice,
+ NVRM_MODULE_ID(c->ModuleId, c->Instance), NV_TRUE ));
+
+ NV_ASSERT(c->close);
+ (c->close)(c);
+
+ /* FIXME: There is a race here. After the Mutex is unlocked someone can
+ * call NvRmI2cOpen and create the mutex, which will then destropyed
+ * here?
+ * */
+ NvOsMutexUnlock(c->I2cThreadSafetyMutex);
+ NvOsMutexDestroy(c->I2cThreadSafetyMutex);
+ c->I2cThreadSafetyMutex = NULL;
+ }
+ else
+ {
+ NvOsMutexUnlock(c->I2cThreadSafetyMutex);
+ }
+ }
+}
+
+static NvError PrivI2cSetSpeed(NvRmI2cController *c)
+{
+ NvError status;
+ NvRmModuleID ModuleId = NVRM_MODULE_ID(c->ModuleId, c->Instance);
+
+ // It seems like the I2C Controller has an hidden clock divider whose value
+ // is 8. So, request for clock value multipled by 8.
+ NvU32 PrefClockFreq = c->clockfreq * 8;
+
+ status = NvRmPowerModuleClockConfig(
+ c->hRmDevice,
+ ModuleId,
+ c->I2cPowerClientId,
+ NvRmFreqUnspecified,
+ PrefClockFreq,
+ &PrefClockFreq,
+ 1,
+ NULL,
+ 0);
+ return status;
+}
+
+static NvError PrivI2cConfigurePower(NvRmI2cController *c, NvBool IsEnablePower)
+{
+ NvError status = NvSuccess;
+ NvRmModuleID ModuleId = NVRM_MODULE_ID(c->ModuleId, c->Instance);
+
+ if (IsEnablePower == NV_TRUE)
+ {
+#if !NV_OAL
+ status = NvRmPowerVoltageControl(
+ c->hRmDevice,
+ ModuleId,
+ c->I2cPowerClientId,
+ NvRmVoltsUnspecified,
+ NvRmVoltsUnspecified,
+ NULL,
+ 0,
+ NULL);
+#endif
+ if(status == NvSuccess)
+ {
+ // Enable the clock to the i2c controller
+ NV_ASSERT_SUCCESS(NvRmPowerModuleClockControl(c->hRmDevice,
+ ModuleId,
+ c->I2cPowerClientId,
+ NV_TRUE));
+ }
+ }
+ else
+ {
+ // Disable the clock to the i2c controller
+ NV_ASSERT_SUCCESS(NvRmPowerModuleClockControl(c->hRmDevice,
+ ModuleId,
+ c->I2cPowerClientId,
+ NV_FALSE));
+
+#if !NV_OAL
+ //disable power
+ status = NvRmPowerVoltageControl(c->hRmDevice,
+ ModuleId,
+ c->I2cPowerClientId,
+ NvRmVoltsOff,
+ NvRmVoltsOff,
+ NULL,
+ 0,
+ NULL);
+#endif
+ }
+ return status;
+}
+
+NvError NvRmI2cTransaction(
+ NvRmI2cHandle hI2c,
+ NvU32 I2cPinMap,
+ NvU32 WaitTimeoutInMilliSeconds,
+ NvU32 ClockSpeedKHz,
+ NvU8 *Data,
+ NvU32 DataLength,
+ NvRmI2cTransactionInfo * Transaction,
+ NvU32 NumOfTransactions)
+{
+ NvU32 len = 0;
+ NvError status;
+ NvU32 i;
+ NvU32 BytesTransferred = 0;
+ NvBool useGpioI2c = NV_FALSE;
+ NvRmI2cController* c;
+ NvU32 Index;
+ NvU32 RSCount = 0; // repeat start count
+ NvS32 StartTransIndex = -1;
+
+ Index = ((NvU32)hI2c) & 0x7FFFFFFF;
+
+ NV_ASSERT(((NvU32)hI2c) & 0x80000000);
+ NV_ASSERT(Index < MaxI2cInstances);
+ NV_ASSERT(Transaction);
+ NV_ASSERT(Data);
+ NV_ASSERT(ClockSpeedKHz <= MAX_I2C_CLOCK_SPEED_KHZ);
+
+ c = &(gs_Cont[Index]);
+ if (c->SocI2cCaps.IsNewMasterAvailable == NV_FALSE)
+ {
+ c->timeout = WaitTimeoutInMilliSeconds;
+ }
+ else
+ {
+ c->timeout = 1000;
+ }
+ c->clockfreq = ClockSpeedKHz;
+
+ NV_ASSERT(((c->PinMapConfig == NvOdmI2cPinMap_Multiplexed) && (I2cPinMap)) ||
+ ((c->PinMapConfig != NvOdmI2cPinMap_Multiplexed) && (!I2cPinMap)));
+
+ if (NvRmIsSimulation())
+ return NvError_NotSupported;
+
+ NvOsMutexLock(c->I2cThreadSafetyMutex);
+
+ // If I2C does not support pkt format use narmal mode to transfer the data
+ if (c->SocI2cCaps.IsNewMasterAvailable == NV_FALSE)
+ {
+ /* Do all the transactions using software GPIO, if one of the transactions
+ * failed to satisfy the hardware requirements. */
+ for (i=0; i< NumOfTransactions; i++)
+ {
+ if (Transaction[i].Flags & NVRM_I2C_NOSTOP)
+ {
+ if ((i+1) >= NumOfTransactions)
+ {
+ useGpioI2c = NV_TRUE;
+ break;
+ }
+ else
+ {
+ if ((Transaction[i].NumBytes > NVRM_I2C_PACKETSIZE_WITH_NOSTOP) ||
+ (Transaction[i].NumBytes != Transaction[i+1].NumBytes))
+ {
+ useGpioI2c = NV_TRUE;
+ break;
+ }
+ }
+ }
+ else
+ {
+ if (Transaction[i].NumBytes > NVRM_I2C_PACKETSIZE)
+ {
+ useGpioI2c = NV_TRUE;
+ break;
+ }
+ }
+ }
+ }
+ if ((Transaction[0].Flags & NVRM_I2C_SOFTWARE_CONTROLLER) ||
+ (useGpioI2c == NV_TRUE))
+ {
+ status = NvRmGpioI2cTransaction(c, I2cPinMap, Data, DataLength,
+ Transaction, NumOfTransactions);
+ NvOsMutexUnlock(c->I2cThreadSafetyMutex);
+ return status;
+ }
+
+ if (I2cPinMap)
+ {
+ NvRmPinMuxConfigSelect(c->hRmDevice, c->OdmIoModule,
+ c->Instance, I2cPinMap);
+
+ NvRmPinMuxConfigSetTristate(c->hRmDevice, c->OdmIoModule,
+ c->Instance, I2cPinMap, NV_FALSE);
+ }
+
+
+ status = PrivI2cConfigurePower(c, NV_TRUE);
+ if (status != NvSuccess)
+ goto TransactionExit;
+
+ status = PrivI2cSetSpeed(c);
+ if (status != NvSuccess)
+ goto TransactionExit;
+
+ len = 0;
+ StartTransIndex = -1;
+ for (i = 0; i < NumOfTransactions; i++)
+ {
+ c->Is10BitAddress = Transaction[i].Is10BitAddress;
+ c->NoACK = NV_FALSE;
+ if (Transaction[i].Flags & NVRM_I2C_NOACK)
+ {
+ c->NoACK = NV_TRUE;
+ }
+ // Check wheather this transation is repeat start or not
+ if (!(Transaction[i].Flags & NVRM_I2C_NOSTOP) && (!RSCount))
+ {
+ if (Transaction[i].Flags & NVRM_I2C_WRITE)
+ {
+ // i2c send transaction
+ status = (c->send)(
+ c,
+ Data,
+ &Transaction[i],
+ &BytesTransferred);
+ }
+ else
+ {
+ // i2c receive transaction
+ status = (c->receive)(
+ c,
+ Data,
+ &Transaction[i],
+ &BytesTransferred);
+ }
+ Data += Transaction[i].NumBytes;
+ }
+ else
+ {
+ RSCount++;
+ // If transation is repeat start,
+ if (Transaction[i].Flags & NVRM_I2C_NOSTOP)
+ {
+ len += Transaction[i].NumBytes;
+ if (StartTransIndex == -1)
+ StartTransIndex = i;
+ }
+ else
+ {
+ // i2c transaction with repeat-start
+ status = (c->repeatStart)(c, Data, &(Transaction[StartTransIndex]), RSCount);
+ Data += len + Transaction[i].NumBytes;
+ RSCount = 0;
+ len = 0;
+ StartTransIndex = -1;
+ }
+ }
+ if (status != NvSuccess)
+ {
+ break;
+ }
+ }
+TransactionExit:
+ PrivI2cConfigurePower(c, NV_FALSE);
+
+ // Re-tristate multi-plexed controllers, and re-multiplex the controller.
+ if (I2cPinMap)
+ {
+ NvRmPinMuxConfigSetTristate(c->hRmDevice, c->OdmIoModule,
+ c->Instance, I2cPinMap, NV_TRUE);
+
+ NvRmPinMuxConfigSelect(c->hRmDevice, c->OdmIoModule,
+ c->Instance, c->PinMapConfig);
+ }
+
+ NvOsMutexUnlock(c->I2cThreadSafetyMutex);
+ return status;
+}
diff --git a/arch/arm/mach-tegra/nvrm/io/common/nvrm_i2c_private.h b/arch/arm/mach-tegra/nvrm/io/common/nvrm_i2c_private.h
new file mode 100644
index 000000000000..a5836bd1ab69
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/common/nvrm_i2c_private.h
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2007-2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/** @file
+ *
+ * @b Description: Contains the i2c declarations.
+ */
+
+#ifndef INCLUDED_NVRM_I2C_PRIVATE_H
+#define INCLUDED_NVRM_I2C_PRIVATE_H
+
+#include "nvrm_module.h"
+#include "nvodm_query_pinmux.h"
+#include "nvrm_gpio.h"
+#include "nvrm_i2c.h"
+#include "nvrm_memmgr.h"
+#include "nvrm_dma.h"
+#include "nvrm_priv_ap_general.h"
+
+
+#define MAX_I2C_CLOCK_SPEED_KHZ 400
+
+// Maximum number of i2c instances including i2c and dvc and a dummy instance
+#define MAX_I2C_INSTANCES ((MAX_I2C_CONTROLLERS) + (MAX_DVC_CONTROLLERS) + 1)
+
+/* Delay used while polling(in polling mode) for the transcation to complete */
+#define I2C_DELAY_USEC 10000
+
+typedef enum
+{
+ // Specifies a read transaction.
+ I2C_READ,
+ // Specifies a write transaction.
+ I2C_WRITE,
+ // Specifies a read transaction using i2c repeat start
+ I2C_REPEAT_START_TRANSACTION
+} I2cTransactionType;
+
+/**
+ * SOC I2C capability structure.
+ */
+typedef struct SocI2cCapabilityRec
+{
+ // Tells whether new master is available or not
+ NvBool IsNewMasterAvailable;
+} SocI2cCapability;
+
+typedef enum
+{
+ /** No Error */
+ I2cControllerStatus_None = 0,
+ /** Receive Fifo Data Request */
+ I2cControllerStatus_RFifo_data_Request = 0x1,
+ /** Transmit Fifo Data Request */
+ I2cControllerStatus_TFifo_data_Request = 0x2,
+ /** Arbitration Lost */
+ I2cControllerStatus_Arb_Lost = 0x4,
+ /** No Acknowledge error */
+ I2cControllerStatus_NoAck = 0x8,
+ /** Receive Fifo Underflow */
+ I2cControllerStatus_RFifo_UFlow = 0x10,
+ /** Transmit Fifo Overflow */
+ I2cControllerStatus_TFifo_OFlow = 0x20,
+ /** All Packets Transfer Complete */
+ I2cControllerStatus_All_Packets_Xfer_Complete = 0x40,
+ /** Packet Transfer Complete */
+ I2cControllerStatus_Packet_Xfer_Complete = 0x80,
+ /** Force to 32 bit */
+ I2cControllerStatus_Force32 = 0x7FFFFFFF
+}I2cControllerStatus;
+
+struct NvRmI2cControllerRec;
+
+/* I2C controller state. There are will one instance of this structure for each
+ * I2C controller instance */
+typedef struct NvRmI2cControllerRec
+{
+ /* Controller static Information */
+
+ /* Rm device handle */
+ NvRmDeviceHandle hRmDevice;
+ /* Contains the i2ctransfer status */
+ NvError I2cTransferStatus;
+ /* Contains the number of opened clients */
+ NvU32 NumberOfClientsOpened;
+ /* Contains the semaphore id to block the synchronous i2c client calls */
+ NvOsSemaphoreHandle I2cSyncSemaphore;
+ /* Contains the mutex for providing the thread safety */
+ NvOsMutexHandle I2cThreadSafetyMutex;
+ /* Power clinet ID */
+ NvU32 I2cPowerClientId;
+ /* Contoller module ID. I2C is supported via DVS module and I2C module. */
+ NvRmModuleID ModuleId;
+
+ // Odm io module name
+ NvOdmIoModule OdmIoModule;
+
+
+ /* Instance of the above specified module */
+ NvU32 Instance;
+ // I2C interrupt handle for this controller instance
+ NvOsInterruptHandle I2CInterruptHandle;
+
+ // I2c Pin mux configuration
+ NvU32 PinMapConfig;
+
+ /* GPIO pin handles for SCL and SDA lines. Used by the GPIO fallback mode */
+ NvRmGpioPinHandle hSclPin;
+ NvRmGpioPinHandle hSdaPin;
+ NvRmGpioHandle hGpio;
+
+ /* Controller run time state. These members will be polulated before the HAL
+ * functions are called. HAL functions should only read these members and
+ * should not clobber these registers. */
+
+ /* I2c clock freq */
+ NvU32 clockfreq;
+ /* Slave device address type */
+ NvBool Is10BitAddress;
+ /* Indictaes that the slave will not generate the ACK */
+ NvBool NoACK;
+ /* timeout for the transfer */
+ NvU32 timeout;
+
+ /* Receive data */
+ NvError (*receive)(struct NvRmI2cControllerRec *c, NvU8 * pBuffer,
+ const NvRmI2cTransactionInfo *pTransaction, NvU32 * pBytesTransferred);
+
+ /* Send data */
+ NvError (*send)(struct NvRmI2cControllerRec *c, NvU8 * pBuffer,
+ const NvRmI2cTransactionInfo *pTransaction, NvU32 * pBytesTransferred);
+
+ /* Repeat start - this is specific to the AP15 and will not be called for
+ * later chips */
+ NvError (*repeatStart)(struct NvRmI2cControllerRec *c, NvU8 * pBuffer,
+ NvRmI2cTransactionInfo *Transactions, NvU32 NoOfTransations);
+
+ /* Return the GPIO pin and port numbers of the SDA and SCL lines for that
+ * controller. */
+ NvBool (*GetGpioPins)(struct NvRmI2cControllerRec *c,
+ NvU32 PinMap, NvU32 *Scl, NvU32 *Sda);
+
+ /* Shutdown the controller */
+ void (*close)(struct NvRmI2cControllerRec *c);
+
+ /* AP15 controller specific state */
+
+ /* Flag to know whether it is a read or a write transaction */
+ I2cTransactionType TransactionType;
+ /* Though all the controllers have same register spec, their start address
+ * doesn't match. DVC contoller I2C register start address differs from the I2C
+ * controller. */
+ NvU32 I2cRegisterOffset;
+
+ // I2C capabiity for this SOC only.
+ SocI2cCapability SocI2cCaps;
+
+ /* Flag to hold int for tfifoReq */
+ volatile NvBool IntForTFIFOReq;
+ /* Flag to hold int for rifoReq */
+ volatile NvBool IntForRFIFOReq;
+ /* Flag to hold all packet complete */
+ volatile NvBool AllPktComplete;
+ /* Flag to hold packet complete */
+ volatile NvBool PktXferComplte;
+ /* Repeat start transfer */
+ volatile NvBool RsTransfer;
+ /* Holds no. of Repeat start transations */
+ volatile NvU32 NoOfRSTransactions;
+ /* pointer to transfer buffer */
+ NvU8 * pTransferBuffer;
+ /* repeat start transations */
+ NvRmI2cTransactionInfo * RSTransactions;
+ volatile NvU32 CurrentReadPktId;
+ volatile NvU32 BytesAlreadyRead;
+ /* Clock period in micro-seconds */
+ NvU32 I2cClockPeriod;
+ /* I2c Controller Status variable */
+ I2cControllerStatus ControllerStatus;
+ /* indicates whether to enable new master or not */
+ NvBool EnableNewMaster;
+
+ NvU32 *pDataBuffer;
+
+ // Amount of word transferred yet
+ NvU32 WordTransferred;
+
+ // Remaining words to be transfer.
+ NvU32 WordRemaining;
+
+ // Final interrupt condition after that transaction completes.
+ NvU32 FinalInterrupt;
+
+ // Content of the interrupt mask register.
+ NvU32 IntMaskReg;
+
+ // Controller Id for packet mode information.
+ NvU32 ControllerId;
+
+ // Tells whether the current transfer is with NO STOP
+ NvBool IsCurrentTransferNoStop;
+
+ // TElls whether the current transfer is with ack or not
+ NvBool IsCurrentTransferNoAck;
+
+ // Tells whether current transfer is a read or write type.
+ NvBool IsCurrentTransferRead;
+
+ // Tells whether transfer is completed or not
+ NvBool IsTransferCompleted;
+
+ // Apb dma related information
+ // Tells whether the dma mode is supported or not.
+ NvBool IsApbDmaAllocated;
+
+ // Dma handle for the read/write.
+ NvRmDmaHandle hRmDma;
+
+ // Memory handle to create the uncached memory.
+ NvRmMemHandle hRmMemory;
+
+ // Dma buffer physical address.
+ NvRmPhysAddr DmaBuffPhysAdd;
+
+ // Virtual pointer to the dma buffer.
+ NvU32 *pDmaBuffer;
+
+ // Current Dma transfer size for the Rx and tx
+ NvU32 DmaBufferSize;
+
+ // Dma request for read transaction
+ NvRmDmaClientBuffer RxDmaReq;
+
+ // Dma request for write transaction
+ NvRmDmaClientBuffer TxDmaReq;
+
+ // Tell whether it is using the apb dma for the transfer or not.
+ NvBool IsUsingApbDma;
+
+ // Buffer which will be used when cpu does the data receving.
+ NvU32 *pCpuBuffer;
+
+ NvU32 TransCountFromLastDmaUsage;
+
+} NvRmI2cController, *NvRmI2cControllerHandle;
+
+NvError NvRmGpioI2cTransaction(
+ NvRmI2cController *c,
+ NvU32 I2cPinMap,
+ NvU8 *Data,
+ NvU32 DataLength,
+ NvRmI2cTransactionInfo * Transaction,
+ NvU32 NumOfTransactions);
+
+
+/**
+ * brief Initialze the AP15 I2C controller to start the data transfer
+ *
+ * This APIs should always return NvSuccess
+ *
+ * @param c I2C controller structure.
+ * */
+NvError AP15RmI2cOpen(NvRmI2cController *c);
+
+/**
+ * brief Initialze the controller to start the data transfer
+ *
+ * This APIs should always return NvSuccess
+ *
+ * @param c I2C controller structure.
+ * */
+NvError AP20RmI2cOpen(NvRmI2cController *c);
+
+#endif // INCLUDED_NVRM_I2C_PRIVATE_H
+
+
diff --git a/arch/arm/mach-tegra/nvrm/io/common/nvrm_owr.c b/arch/arm/mach-tegra/nvrm/io/common/nvrm_owr.c
new file mode 100644
index 000000000000..bc1b7f7e2126
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/common/nvrm_owr.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/** @file
+ * @brief <b>NVIDIA Driver Development Kit: OWR API</b>
+ *
+ * @b Description: Contains the NvRM OWR implementation.
+ */
+
+#include "nvrm_owr.h"
+#include "nvrm_drf.h"
+#include "nvos.h"
+#include "nvrm_module.h"
+#include "nvrm_hardware_access.h"
+#include "nvrm_power.h"
+#include "nvrm_interrupt.h"
+#include "nvassert.h"
+#include "nvodm_query_pinmux.h"
+#include "nvrm_pinmux.h"
+#include "nvrm_chiplib.h"
+#include "nvrm_hwintf.h"
+#include "nvodm_modules.h"
+#include "nvrm_structure.h"
+#include "nvrm_pinmux_utils.h"
+#include "nvrm_owr_private.h"
+
+// Mask to get the instance from the OWR handle
+// LSB byte of the OWR handle stores the OWR instance.
+#define OWR_HANDLE_INSTANCE_MASK 0xFF
+
+// MSB bit of the OWR handle. MSB bit of the OWR
+// handle is always set to 1 to make sure OWR handle is never NULL.
+#define OWR_HANDLE_MSB_BIT 0x80000000
+
+/* Array of controllers */
+static NvRmOwrController gs_OwrControllers[MAX_OWR_INSTANCES];
+static NvRmOwrController *gs_OwrCont = NULL;
+
+// Maximum OWR Instances present in this SOC
+static NvU32 MaxOwrInstances;
+
+
+static void
+PrivOwrConfigurePower(
+ NvRmOwrController *pOwrInfo,
+ NvBool IsEnablePower);
+
+static NvError
+PrivOwrGetCaps(
+ NvRmDeviceHandle hDevice,
+ NvU32 Instance,
+ NvRmOwrCapability** pOwrSocCaps);
+
+
+static void
+PrivOwrConfigurePower(
+ NvRmOwrController *pOwrInfo,
+ NvBool IsEnablePower)
+{
+ NvRmModuleID ModuleId =
+ NVRM_MODULE_ID(pOwrInfo->ModuleId, pOwrInfo->Instance);
+
+ if (IsEnablePower == NV_TRUE)
+ {
+ NV_ASSERT_SUCCESS(NvRmPowerVoltageControl(
+ pOwrInfo->hRmDevice,
+ ModuleId,
+ pOwrInfo->OwrPowerClientId,
+ NvRmVoltsUnspecified,
+ NvRmVoltsUnspecified,
+ NULL,
+ 0,
+ NULL));
+
+ // Enable the clock to the OWR controller
+ NV_ASSERT_SUCCESS(NvRmPowerModuleClockControl(pOwrInfo->hRmDevice,
+ ModuleId,
+ pOwrInfo->OwrPowerClientId,
+ NV_TRUE));
+ }
+ else
+ {
+ // Disable the clock to the OWR controller
+ NV_ASSERT_SUCCESS(NvRmPowerModuleClockControl(pOwrInfo->hRmDevice,
+ ModuleId,
+ pOwrInfo->OwrPowerClientId,
+ NV_FALSE));
+
+ //disable power
+ NV_ASSERT_SUCCESS(NvRmPowerVoltageControl(pOwrInfo->hRmDevice,
+ ModuleId,
+ pOwrInfo->OwrPowerClientId,
+ NvRmVoltsOff,
+ NvRmVoltsOff,
+ NULL,
+ 0,
+ NULL));
+ }
+}
+
+static NvError
+PrivOwrGetCaps(
+ NvRmDeviceHandle hDevice,
+ NvU32 Instance,
+ NvRmOwrCapability** pOwrSocCaps)
+{
+ static NvRmOwrCapability s_OwrCap0;
+ NvError status = NvSuccess;
+ NvRmOwrCapability *pOwrCaps = NULL;
+ static NvRmModuleCapability s_OwrCaps[] =
+ { {1, 0, 0, &s_OwrCap0 }
+ };
+
+ s_OwrCap0.NoOfInstances = 1;
+ // Get the capability from modules files.
+ status = NvRmModuleGetCapabilities(hDevice,
+ NVRM_MODULE_ID(NvRmModuleID_OneWire, Instance),
+ s_OwrCaps,
+ NV_ARRAY_SIZE(s_OwrCaps), (void **)&pOwrCaps);
+ if (status == NvSuccess)
+ {
+ *pOwrSocCaps = pOwrCaps;
+ }
+ return status;
+}
+
+NvError
+NvRmOwrOpen(
+ NvRmDeviceHandle hRmDevice,
+ NvU32 Instance,
+ NvRmOwrHandle *phOwr)
+{
+ NvError status = NvSuccess;
+ NvRmOwrController *pOwrInfo;
+ NvU32 PrefClockFreq = MAX_OWR_CLOCK_SPEED_KHZ;
+ NvRmModuleID ModuleId;
+ NvOdmIoModule IoModule = NvOdmIoModule_OneWire;
+ NvRmOwrCapability *OwrSocCaps = NULL;
+
+ NV_ASSERT(hRmDevice);
+ NV_ASSERT(phOwr);
+
+ /** If none of the controller is opened, allocate memory for all controllers
+ * in the system
+ */
+ if (gs_OwrCont == NULL)
+ {
+ gs_OwrCont = gs_OwrControllers;
+ }
+
+ /* Validate the Instance number passed */
+ if (IoModule == NvOdmIoModule_OneWire)
+ {
+ MaxOwrInstances =
+ NvRmModuleGetNumInstances(hRmDevice, NvRmModuleID_OneWire);
+ NV_ASSERT(Instance < MaxOwrInstances);
+ ModuleId = NvRmModuleID_OneWire;
+ }
+ else
+ {
+ NV_ASSERT(!"Invalid IO module");
+ }
+
+ status = PrivOwrGetCaps(hRmDevice, Instance, &OwrSocCaps);
+ if (status != NvSuccess)
+ {
+ return NvError_NotInitialized;
+ }
+
+ pOwrInfo = &(gs_OwrCont[Instance]);
+
+ // Create the mutex for providing the thread safety for OWR API
+ if (pOwrInfo->NumberOfClientsOpened == 0)
+ {
+ status = NvOsMutexCreate(&(pOwrInfo->OwrThreadSafetyMutex));
+ if (status != NvSuccess)
+ {
+ pOwrInfo->OwrThreadSafetyMutex = NULL;
+ return status;
+ }
+ }
+
+ NvOsMutexLock(pOwrInfo->OwrThreadSafetyMutex);
+ // If no clients are opened yet, initialize the OWR controller
+ if (pOwrInfo->NumberOfClientsOpened == 0)
+ {
+ /* Polulate the controller structure */
+ pOwrInfo->hRmDevice = hRmDevice;
+ pOwrInfo->ModuleId = ModuleId;
+ pOwrInfo->Instance = Instance;
+ pOwrInfo->OwrPowerClientId = 0;
+
+ NV_ASSERT_SUCCESS(AP20RmOwrOpen(pOwrInfo));
+
+ /* Make sure that all the functions are populated by the HAL driver */
+ NV_ASSERT(pOwrInfo->read && pOwrInfo->write && pOwrInfo->close);
+
+ status = NvRmSetModuleTristate(pOwrInfo->hRmDevice,
+ NVRM_MODULE_ID(pOwrInfo->ModuleId, pOwrInfo->Instance), NV_FALSE);
+ if (status != NvSuccess)
+ {
+ goto fail;
+ }
+
+ status =
+ NvRmPowerRegister(hRmDevice, NULL, &pOwrInfo->OwrPowerClientId);
+ if (status != NvSuccess)
+ {
+ goto fail;
+ }
+
+ /** Enable power rail, enable clock, configure clock to right freq,
+ * reset, disable clock, notify to disable power rail.
+ *
+ * All of this is done to just reset the controller.
+ */
+ PrivOwrConfigurePower(pOwrInfo, NV_TRUE);
+ status = NvRmPowerModuleClockConfig(hRmDevice,
+ NVRM_MODULE_ID(ModuleId, Instance),
+ pOwrInfo->OwrPowerClientId,
+ PrefClockFreq,
+ NvRmFreqUnspecified,
+ &PrefClockFreq,
+ 1,
+ NULL,
+ 0);
+ if (status != NvSuccess)
+ {
+ PrivOwrConfigurePower(pOwrInfo, NV_FALSE);
+ goto fail;
+ }
+ NvRmModuleReset(hRmDevice, NVRM_MODULE_ID(ModuleId, Instance));
+ PrivOwrConfigurePower(pOwrInfo, NV_FALSE);
+ }
+ pOwrInfo->NumberOfClientsOpened++;
+ NvOsMutexUnlock(pOwrInfo->OwrThreadSafetyMutex);
+
+ /** We cannot return handle with a value of 0, as some clients check the
+ * handle to ne non-zero. So, to get around that we set MSB bit to 1.
+ */
+ *phOwr = (NvRmOwrHandle)(Instance | OWR_HANDLE_MSB_BIT);
+ return NvSuccess;
+
+fail:
+ if (pOwrInfo->OwrPowerClientId)
+ {
+ NvRmPowerUnRegister(hRmDevice, pOwrInfo->OwrPowerClientId);
+ pOwrInfo->OwrPowerClientId = 0;
+ }
+
+ (pOwrInfo->close)(pOwrInfo);
+ *phOwr = 0;
+
+ NvOsMutexUnlock(pOwrInfo->OwrThreadSafetyMutex);
+ NvOsMutexDestroy(pOwrInfo->OwrThreadSafetyMutex);
+
+ return status;
+}
+
+void NvRmOwrClose(NvRmOwrHandle hOwr)
+{
+ NvU32 Index;
+ NvRmOwrController *pOwrInfo;
+
+ if (hOwr == NULL)
+ return;
+
+ Index = ((NvU32) hOwr) & OWR_HANDLE_INSTANCE_MASK;
+ if (Index < MaxOwrInstances)
+ {
+ pOwrInfo = &(gs_OwrCont[Index]);
+
+ NvOsMutexLock(pOwrInfo->OwrThreadSafetyMutex);
+ pOwrInfo->NumberOfClientsOpened--;
+ if (pOwrInfo->NumberOfClientsOpened == 0)
+ {
+ /* Unregister the power client ID */
+ NvRmPowerUnRegister(pOwrInfo->hRmDevice,
+ pOwrInfo->OwrPowerClientId);
+ pOwrInfo->OwrPowerClientId = 0;
+
+ NV_ASSERT_SUCCESS( NvRmSetModuleTristate(pOwrInfo->hRmDevice,
+ NVRM_MODULE_ID(pOwrInfo->ModuleId, pOwrInfo->Instance), NV_TRUE ));
+
+ NV_ASSERT(pOwrInfo->close);
+ (pOwrInfo->close)(pOwrInfo);
+
+ /** FIXME: There is a race here. After the Mutex is unlocked someone
+ * can call NvRmOwrOpen and create the mutex, which will then be
+ * destroyed here.
+ */
+ NvOsMutexUnlock(pOwrInfo->OwrThreadSafetyMutex);
+ NvOsMutexDestroy(pOwrInfo->OwrThreadSafetyMutex);
+ pOwrInfo->OwrThreadSafetyMutex = NULL;
+ }
+ else
+ {
+ NvOsMutexUnlock(pOwrInfo->OwrThreadSafetyMutex);
+ }
+ }
+}
+
+NvError NvRmOwrTransaction(
+ NvRmOwrHandle hOwr,
+ NvU32 OwrPinMap,
+ NvU8 *Data,
+ NvU32 DataLength,
+ NvRmOwrTransactionInfo * Transaction,
+ NvU32 NumOfTransactions)
+{
+ NvU32 i;
+ NvRmOwrController* pOwrInfo;
+ NvU32 Index;
+ NvError status = NvSuccess;
+
+ Index = ((NvU32)hOwr) & OWR_HANDLE_INSTANCE_MASK;
+
+ NV_ASSERT(Index < MaxOwrInstances);
+ NV_ASSERT(Transaction);
+ NV_ASSERT(Data);
+
+ pOwrInfo = &(gs_OwrCont[Index]);
+
+ NvOsMutexLock(pOwrInfo->OwrThreadSafetyMutex);
+
+ PrivOwrConfigurePower(pOwrInfo, NV_TRUE);
+
+ for (i = 0; i < NumOfTransactions; i++)
+ {
+ if (Transaction[i].Flags == NvRmOwr_MemWrite)
+ {
+ // OWR write transaction
+ status = (pOwrInfo->write)(
+ pOwrInfo,
+ Data,
+ Transaction[i]);
+ }
+ else
+ {
+ // OWR read transaction
+ status = (pOwrInfo->read)(
+ pOwrInfo,
+ Data,
+ Transaction[i]);
+ }
+ Data += Transaction[i].NumBytes;
+ if (status != NvSuccess)
+ {
+ break;
+ }
+ }
+
+ PrivOwrConfigurePower(pOwrInfo, NV_FALSE);
+
+ NvOsMutexUnlock(pOwrInfo->OwrThreadSafetyMutex);
+ return status;
+}
+
diff --git a/arch/arm/mach-tegra/nvrm/io/common/nvrm_owr_private.h b/arch/arm/mach-tegra/nvrm/io/common/nvrm_owr_private.h
new file mode 100644
index 000000000000..efeb0efb2144
--- /dev/null
+++ b/arch/arm/mach-tegra/nvrm/io/common/nvrm_owr_private.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2009 NVIDIA Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/** @file
+ *
+ * @b Description: Contains the OWR declarations.
+ */
+
+#ifndef INCLUDED_NVRM_OWR_PRIVATE_H
+#define INCLUDED_NVRM_OWR_PRIVATE_H
+
+#include "nvrm_module.h"
+#include "nvodm_query_pinmux.h"
+#include "nvrm_gpio.h"
+#include "nvrm_owr.h"
+
+/* Maximum number of OWR controller instances supported */
+#define MAX_OWR_INSTANCES 1
+
+#define MAX_OWR_CLOCK_SPEED_KHZ 1000
+
+#define OWR_NO_OF_BITS_PER_BYTE 8
+
+// ROM id size
+#define OWR_ROM_ID_SIZE_BYTES 8
+
+/* OWR ROM commands */
+#define OWR_ROM_READ_COMMAND 0x33
+#define OWR_ROM_SKIP_COMMAND 0xCC
+
+/* OWR MEM commands */
+#define OWR_MEM_READ_COMMAND 0xF0
+#define OWR_MEM_WRITE_COMMAND 0x0F
+
+/**
+ * @brief OWR interrupt status bits
+ */
+typedef enum
+{
+ // Presence Error Interrupt enable
+ OwrIntrStatus_PresenceErrIntEnable = 0x1,
+
+ // CRC Error Interrupt enable
+ OwrIntrStatus_CrcErrIntEnable = 0x2,
+
+ // Mem write Error Interrupt enable
+ OwrIntrStatus_MemWriteErrIntEnable = 0x4,
+
+ // Error Command Interrupt enable
+ OwrIntrStatus_ErrCommandIntEnable = 0x8,
+
+ // Reset done Interrupt enable
+ OwrIntrStatus_RstDoneIntEnable = 0x10,
+
+ // Presence done Interrupt enable
+ OwrIntrStatus_PresenceDoneIntEnable = 0x20,
+
+ // ROM Command done Interrupt enable
+ OwrIntrStatus_RomCmdDoneIntEnable = 0x40,
+
+ // MEM Command done Interrupt enable
+ OwrIntrStatus_MemCmdDoneIntEnable = 0x80,
+
+ // TXF overflow Interrupt enable
+ OwrIntrStatus_TxfOvfIntEnable = 0x100,
+
+ // RXF underrun Interrupt enable
+ OwrIntrStatus_RxfUnrIntEnable = 0x200,
+
+ // Dglitch Interrupt enable
+ OwrIntrStatus_DglitchIntEnable = 0x400,
+
+ // TX FIFO Data Request Interrupt enable
+ OwrIntrStatus_TxFifoDataReqIntEnable = 0x800,
+
+ // RX FIFO Data Request Interrupt enable
+ OwrIntrStatus_RxFifoDataReqIntEnable = 0x1000,
+
+ // Bit transfer done Interrupt enable
+ OwrIntrStatus_BitTransferDoneIntEnable = 0x2000,
+
+ /** Force to 32 bit */
+ OwrIntrStatus_Force32 = 0x7FFFFFFF
+} OwrIntrStatus;
+
+/**
+ * @brief OWR interrupt status bits
+ */
+typedef enum
+{
+ // Ready bit
+ OwrStatus_Rdy = 0x0,
+
+ // Tx FIFO Full
+ OwrStatus_TxfFull = 0,
+
+ // Tx FIFO Empty
+ OwrStatus_TxfEmpty = 0,
+
+ // RTx FIFO Full
+ OwrStatus_RxfFull = 0,
+
+ // Rx FIFO Empty
+ OwrStatus_RxfEmpty = 0,
+
+ // Tx Flush
+ OwrStatus_TxfFlush = 0,
+
+ // Rx Flush
+ OwrStatus_RxfFlush = 0,
+
+ // Rx Fifo Full Count
+ OwrStatus_RxFifoFullCnt = 0,
+
+ // Tx Fifo empty Count
+ OwrStatus_TxFifoEmptyCnt = 0,
+
+ // Reset
+ OwrStatus_Rpp = 0,
+
+ // Write bit 0
+ OwrStatus_Write0 = 0,
+
+ // Write bit 1
+ OwrStatus_Write1 = 0,
+
+ // Read bit
+ OwrStatus_Read = 0,
+
+ // Read Sampled bit
+ OwrStatus_ReadSampledBit = 0,
+
+ /** Force to 32 bit */
+ OwrStatus_Force32 = 0x7FFFFFFF
+} OwrStatus;
+
+typedef enum
+{
+ // Specifies a read transaction.
+ OWR_READ,
+ // Specifies a write transaction.
+ OWR_WRITE,
+} OwrTransactionType;
+
+struct NvRmOwrControllerRec;
+
+/* OWR controller state. There are will one instance of this structure for each
+ * OWR controller instance */
+typedef struct NvRmOwrControllerRec
+{
+ /* Controller static Information */
+
+ /* Rm device handle */
+ NvRmDeviceHandle hRmDevice;
+
+ /* Contains the owrtransfer status */
+ NvU32 OwrTransferStatus;
+
+ /* Contains the number of opened clients */
+ NvU32 NumberOfClientsOpened;
+
+ /* Contains the semaphore id to block the synchronous owr client calls */
+ NvOsSemaphoreHandle OwrSyncSemaphore;
+
+ /* Contains the mutex for providing the thread safety */
+ NvOsMutexHandle OwrThreadSafetyMutex;
+
+ /* Power clinet ID */
+ NvU32 OwrPowerClientId;
+
+ /* Contoller module ID. */
+ NvRmModuleID ModuleId;
+
+ /* Instance of the above specified module */
+ NvU32 Instance;
+
+ // OWR interrupt handle for this controller instance
+ NvOsInterruptHandle OwrInterruptHandle;
+
+ /* Read data */
+ NvError (*read)(struct NvRmOwrControllerRec *c, NvU8 * pBuffer,
+ NvRmOwrTransactionInfo Transaction);
+
+ /* Send data */
+ NvError (*write)(struct NvRmOwrControllerRec *c, NvU8 * pBuffer,
+ NvRmOwrTransactionInfo Transaction);
+
+ /* Shutdown the controller */
+ void (*close)(struct NvRmOwrControllerRec *c);
+
+ // OWR capabiity for this SOC only.
+ NvU32* pOwrVirtualAddress;
+ NvU32 OwrBankSize;
+ NvRmPhysAddr OwrPhysicalAddress;
+} NvRmOwrController;
+
+/** OWR SOC capability structure. */
+typedef struct SocOwrCapabilityRec
+{
+ NvU32 NoOfInstances;
+} NvRmOwrCapability;
+
+
+/**
+ * brief Initialze the controller to start the data transfer
+ *
+ * This APIs should always return NvSuccess
+ *
+ * @param c OWR controller structure.
+ * */
+NvError AP20RmOwrOpen(NvRmOwrController *c);
+
+#endif // INCLUDED_NVRM_OWR_PRIVATE_H
+
+