summaryrefslogtreecommitdiff
path: root/drivers/clocksource
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clocksource')
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c436
-rw-r--r--drivers/clocksource/em_sti.c13
-rw-r--r--drivers/clocksource/exynos_mct.c568
-rw-r--r--drivers/clocksource/sh_cmt.c189
-rw-r--r--drivers/clocksource/sh_mtu2.c2
-rw-r--r--drivers/clocksource/sh_tmu.c2
8 files changed, 1146 insertions, 74 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 9002185a0a1a..7bc6e51757ee 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -31,6 +31,9 @@ config SUN4I_TIMER
config VT8500_TIMER
bool
+config CADENCE_TTC_TIMER
+ bool
+
config CLKSRC_NOMADIK_MTU
bool
depends on (ARCH_NOMADIK || ARCH_U8500)
@@ -67,3 +70,8 @@ config CLKSRC_METAG_GENERIC
def_bool y if METAG
help
This option enables support for the Meta per-thread timers.
+
+config CLKSRC_EXYNOS_MCT
+ def_bool y if ARCH_EXYNOS
+ help
+ Support for Multi Core Timer controller on Exynos SoCs.
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 682d48d08164..caacdb63aff9 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -23,6 +23,8 @@ obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
obj-$(CONFIG_ARCH_BCM) += bcm_kona_timer.o
+obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
+obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
new file mode 100644
index 000000000000..685bc60e210a
--- /dev/null
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -0,0 +1,436 @@
+/*
+ * This file contains driver for the Cadence Triple Timer Counter Rev 06
+ *
+ * Copyright (C) 2011-2013 Xilinx
+ *
+ * based on arch/mips/kernel/time.c timer driver
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+
+/*
+ * This driver configures the 2 16-bit count-up timers as follows:
+ *
+ * T1: Timer 1, clocksource for generic timekeeping
+ * T2: Timer 2, clockevent source for hrtimers
+ * T3: Timer 3, <unused>
+ *
+ * The input frequency to the timer module for emulation is 2.5MHz which is
+ * common to all the timer channels (T1, T2, and T3). With a pre-scaler of 32,
+ * the timers are clocked at 78.125KHz (12.8 us resolution).
+
+ * The input frequency to the timer module in silicon is configurable and
+ * obtained from device tree. The pre-scaler of 32 is used.
+ */
+
+/*
+ * Timer Register Offset Definitions of Timer 1, Increment base address by 4
+ * and use same offsets for Timer 2
+ */
+#define TTC_CLK_CNTRL_OFFSET 0x00 /* Clock Control Reg, RW */
+#define TTC_CNT_CNTRL_OFFSET 0x0C /* Counter Control Reg, RW */
+#define TTC_COUNT_VAL_OFFSET 0x18 /* Counter Value Reg, RO */
+#define TTC_INTR_VAL_OFFSET 0x24 /* Interval Count Reg, RW */
+#define TTC_ISR_OFFSET 0x54 /* Interrupt Status Reg, RO */
+#define TTC_IER_OFFSET 0x60 /* Interrupt Enable Reg, RW */
+
+#define TTC_CNT_CNTRL_DISABLE_MASK 0x1
+
+/*
+ * Setup the timers to use pre-scaling, using a fixed value for now that will
+ * work across most input frequency, but it may need to be more dynamic
+ */
+#define PRESCALE_EXPONENT 11 /* 2 ^ PRESCALE_EXPONENT = PRESCALE */
+#define PRESCALE 2048 /* The exponent must match this */
+#define CLK_CNTRL_PRESCALE ((PRESCALE_EXPONENT - 1) << 1)
+#define CLK_CNTRL_PRESCALE_EN 1
+#define CNT_CNTRL_RESET (1 << 4)
+
+/**
+ * struct ttc_timer - This definition defines local timer structure
+ *
+ * @base_addr: Base address of timer
+ * @clk: Associated clock source
+ * @clk_rate_change_nb Notifier block for clock rate changes
+ */
+struct ttc_timer {
+ void __iomem *base_addr;
+ struct clk *clk;
+ struct notifier_block clk_rate_change_nb;
+};
+
+#define to_ttc_timer(x) \
+ container_of(x, struct ttc_timer, clk_rate_change_nb)
+
+struct ttc_timer_clocksource {
+ struct ttc_timer ttc;
+ struct clocksource cs;
+};
+
+#define to_ttc_timer_clksrc(x) \
+ container_of(x, struct ttc_timer_clocksource, cs)
+
+struct ttc_timer_clockevent {
+ struct ttc_timer ttc;
+ struct clock_event_device ce;
+};
+
+#define to_ttc_timer_clkevent(x) \
+ container_of(x, struct ttc_timer_clockevent, ce)
+
+/**
+ * ttc_set_interval - Set the timer interval value
+ *
+ * @timer: Pointer to the timer instance
+ * @cycles: Timer interval ticks
+ **/
+static void ttc_set_interval(struct ttc_timer *timer,
+ unsigned long cycles)
+{
+ u32 ctrl_reg;
+
+ /* Disable the counter, set the counter value and re-enable counter */
+ ctrl_reg = __raw_readl(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
+ __raw_writel(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+
+ __raw_writel(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET);
+
+ /*
+ * Reset the counter (0x10) so that it starts from 0, one-shot
+ * mode makes this needed for timing to be right.
+ */
+ ctrl_reg |= CNT_CNTRL_RESET;
+ ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
+ __raw_writel(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+}
+
+/**
+ * ttc_clock_event_interrupt - Clock event timer interrupt handler
+ *
+ * @irq: IRQ number of the Timer
+ * @dev_id: void pointer to the ttc_timer instance
+ *
+ * returns: Always IRQ_HANDLED - success
+ **/
+static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
+{
+ struct ttc_timer_clockevent *ttce = dev_id;
+ struct ttc_timer *timer = &ttce->ttc;
+
+ /* Acknowledge the interrupt and call event handler */
+ __raw_readl(timer->base_addr + TTC_ISR_OFFSET);
+
+ ttce->ce.event_handler(&ttce->ce);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * __ttc_clocksource_read - Reads the timer counter register
+ *
+ * returns: Current timer counter register value
+ **/
+static cycle_t __ttc_clocksource_read(struct clocksource *cs)
+{
+ struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc;
+
+ return (cycle_t)__raw_readl(timer->base_addr +
+ TTC_COUNT_VAL_OFFSET);
+}
+
+/**
+ * ttc_set_next_event - Sets the time interval for next event
+ *
+ * @cycles: Timer interval ticks
+ * @evt: Address of clock event instance
+ *
+ * returns: Always 0 - success
+ **/
+static int ttc_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
+ struct ttc_timer *timer = &ttce->ttc;
+
+ ttc_set_interval(timer, cycles);
+ return 0;
+}
+
+/**
+ * ttc_set_mode - Sets the mode of timer
+ *
+ * @mode: Mode to be set
+ * @evt: Address of clock event instance
+ **/
+static void ttc_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
+ struct ttc_timer *timer = &ttce->ttc;
+ u32 ctrl_reg;
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ ttc_set_interval(timer,
+ DIV_ROUND_CLOSEST(clk_get_rate(ttce->ttc.clk),
+ PRESCALE * HZ));
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ ctrl_reg = __raw_readl(timer->base_addr +
+ TTC_CNT_CNTRL_OFFSET);
+ ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
+ __raw_writel(ctrl_reg,
+ timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ ctrl_reg = __raw_readl(timer->base_addr +
+ TTC_CNT_CNTRL_OFFSET);
+ ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
+ __raw_writel(ctrl_reg,
+ timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ break;
+ }
+}
+
+static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct ttc_timer *ttc = to_ttc_timer(nb);
+ struct ttc_timer_clocksource *ttccs = container_of(ttc,
+ struct ttc_timer_clocksource, ttc);
+
+ switch (event) {
+ case POST_RATE_CHANGE:
+ /*
+ * Do whatever is necessary to maintain a proper time base
+ *
+ * I cannot find a way to adjust the currently used clocksource
+ * to the new frequency. __clocksource_updatefreq_hz() sounds
+ * good, but does not work. Not sure what's that missing.
+ *
+ * This approach works, but triggers two clocksource switches.
+ * The first after unregister to clocksource jiffies. And
+ * another one after the register to the newly registered timer.
+ *
+ * Alternatively we could 'waste' another HW timer to ping pong
+ * between clock sources. That would also use one register and
+ * one unregister call, but only trigger one clocksource switch
+ * for the cost of another HW timer used by the OS.
+ */
+ clocksource_unregister(&ttccs->cs);
+ clocksource_register_hz(&ttccs->cs,
+ ndata->new_rate / PRESCALE);
+ /* fall through */
+ case PRE_RATE_CHANGE:
+ case ABORT_RATE_CHANGE:
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
+{
+ struct ttc_timer_clocksource *ttccs;
+ int err;
+
+ ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL);
+ if (WARN_ON(!ttccs))
+ return;
+
+ ttccs->ttc.clk = clk;
+
+ err = clk_prepare_enable(ttccs->ttc.clk);
+ if (WARN_ON(err)) {
+ kfree(ttccs);
+ return;
+ }
+
+ ttccs->ttc.clk_rate_change_nb.notifier_call =
+ ttc_rate_change_clocksource_cb;
+ ttccs->ttc.clk_rate_change_nb.next = NULL;
+ if (clk_notifier_register(ttccs->ttc.clk,
+ &ttccs->ttc.clk_rate_change_nb))
+ pr_warn("Unable to register clock notifier.\n");
+
+ ttccs->ttc.base_addr = base;
+ ttccs->cs.name = "ttc_clocksource";
+ ttccs->cs.rating = 200;
+ ttccs->cs.read = __ttc_clocksource_read;
+ ttccs->cs.mask = CLOCKSOURCE_MASK(16);
+ ttccs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /*
+ * Setup the clock source counter to be an incrementing counter
+ * with no interrupt and it rolls over at 0xFFFF. Pre-scale
+ * it by 32 also. Let it start running now.
+ */
+ __raw_writel(0x0, ttccs->ttc.base_addr + TTC_IER_OFFSET);
+ __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
+ ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ __raw_writel(CNT_CNTRL_RESET,
+ ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
+
+ err = clocksource_register_hz(&ttccs->cs,
+ clk_get_rate(ttccs->ttc.clk) / PRESCALE);
+ if (WARN_ON(err)) {
+ kfree(ttccs);
+ return;
+ }
+}
+
+static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct ttc_timer *ttc = to_ttc_timer(nb);
+ struct ttc_timer_clockevent *ttcce = container_of(ttc,
+ struct ttc_timer_clockevent, ttc);
+
+ switch (event) {
+ case POST_RATE_CHANGE:
+ {
+ unsigned long flags;
+
+ /*
+ * clockevents_update_freq should be called with IRQ disabled on
+ * the CPU the timer provides events for. The timer we use is
+ * common to both CPUs, not sure if we need to run on both
+ * cores.
+ */
+ local_irq_save(flags);
+ clockevents_update_freq(&ttcce->ce,
+ ndata->new_rate / PRESCALE);
+ local_irq_restore(flags);
+
+ /* fall through */
+ }
+ case PRE_RATE_CHANGE:
+ case ABORT_RATE_CHANGE:
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static void __init ttc_setup_clockevent(struct clk *clk,
+ void __iomem *base, u32 irq)
+{
+ struct ttc_timer_clockevent *ttcce;
+ int err;
+
+ ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL);
+ if (WARN_ON(!ttcce))
+ return;
+
+ ttcce->ttc.clk = clk;
+
+ err = clk_prepare_enable(ttcce->ttc.clk);
+ if (WARN_ON(err)) {
+ kfree(ttcce);
+ return;
+ }
+
+ ttcce->ttc.clk_rate_change_nb.notifier_call =
+ ttc_rate_change_clockevent_cb;
+ ttcce->ttc.clk_rate_change_nb.next = NULL;
+ if (clk_notifier_register(ttcce->ttc.clk,
+ &ttcce->ttc.clk_rate_change_nb))
+ pr_warn("Unable to register clock notifier.\n");
+
+ ttcce->ttc.base_addr = base;
+ ttcce->ce.name = "ttc_clockevent";
+ ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ ttcce->ce.set_next_event = ttc_set_next_event;
+ ttcce->ce.set_mode = ttc_set_mode;
+ ttcce->ce.rating = 200;
+ ttcce->ce.irq = irq;
+ ttcce->ce.cpumask = cpu_possible_mask;
+
+ /*
+ * Setup the clock event timer to be an interval timer which
+ * is prescaled by 32 using the interval interrupt. Leave it
+ * disabled for now.
+ */
+ __raw_writel(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
+ __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
+ ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ __raw_writel(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET);
+
+ err = request_irq(irq, ttc_clock_event_interrupt,
+ IRQF_DISABLED | IRQF_TIMER,
+ ttcce->ce.name, ttcce);
+ if (WARN_ON(err)) {
+ kfree(ttcce);
+ return;
+ }
+
+ clockevents_config_and_register(&ttcce->ce,
+ clk_get_rate(ttcce->ttc.clk) / PRESCALE, 1, 0xfffe);
+}
+
+/**
+ * ttc_timer_init - Initialize the timer
+ *
+ * Initializes the timer hardware and register the clock source and clock event
+ * timers with Linux kernal timer framework
+ */
+static void __init ttc_timer_init(struct device_node *timer)
+{
+ unsigned int irq;
+ void __iomem *timer_baseaddr;
+ struct clk *clk;
+ static int initialized;
+
+ if (initialized)
+ return;
+
+ initialized = 1;
+
+ /*
+ * Get the 1st Triple Timer Counter (TTC) block from the device tree
+ * and use it. Note that the event timer uses the interrupt and it's the
+ * 2nd TTC hence the irq_of_parse_and_map(,1)
+ */
+ timer_baseaddr = of_iomap(timer, 0);
+ if (!timer_baseaddr) {
+ pr_err("ERROR: invalid timer base address\n");
+ BUG();
+ }
+
+ irq = irq_of_parse_and_map(timer, 1);
+ if (irq <= 0) {
+ pr_err("ERROR: invalid interrupt number\n");
+ BUG();
+ }
+
+ clk = of_clk_get_by_name(timer, "cpu_1x");
+ if (IS_ERR(clk)) {
+ pr_err("ERROR: timer input clock not found\n");
+ BUG();
+ }
+
+ ttc_setup_clocksource(clk, timer_baseaddr);
+ ttc_setup_clockevent(clk, timer_baseaddr + 4, irq);
+
+ pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
+}
+
+CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index e6a553cb73e8..4329a29a5310 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -399,7 +399,18 @@ static struct platform_driver em_sti_device_driver = {
}
};
-module_platform_driver(em_sti_device_driver);
+static int __init em_sti_init(void)
+{
+ return platform_driver_register(&em_sti_device_driver);
+}
+
+static void __exit em_sti_exit(void)
+{
+ platform_driver_unregister(&em_sti_device_driver);
+}
+
+subsys_initcall(em_sti_init);
+module_exit(em_sti_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
new file mode 100644
index 000000000000..661026834b23
--- /dev/null
+++ b/drivers/clocksource/exynos_mct.c
@@ -0,0 +1,568 @@
+/* linux/arch/arm/mach-exynos4/mct.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS4 MCT(Multi-Core Timer) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/percpu.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/clocksource.h>
+
+#include <asm/arch_timer.h>
+#include <asm/localtimer.h>
+
+#include <plat/cpu.h>
+
+#include <mach/map.h>
+#include <mach/irqs.h>
+#include <asm/mach/time.h>
+
+#define EXYNOS4_MCTREG(x) (x)
+#define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
+#define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104)
+#define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110)
+#define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200)
+#define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204)
+#define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208)
+#define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240)
+#define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244)
+#define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248)
+#define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C)
+#define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300)
+#define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x))
+#define EXYNOS4_MCT_L_MASK (0xffffff00)
+
+#define MCT_L_TCNTB_OFFSET (0x00)
+#define MCT_L_ICNTB_OFFSET (0x08)
+#define MCT_L_TCON_OFFSET (0x20)
+#define MCT_L_INT_CSTAT_OFFSET (0x30)
+#define MCT_L_INT_ENB_OFFSET (0x34)
+#define MCT_L_WSTAT_OFFSET (0x40)
+#define MCT_G_TCON_START (1 << 8)
+#define MCT_G_TCON_COMP0_AUTO_INC (1 << 1)
+#define MCT_G_TCON_COMP0_ENABLE (1 << 0)
+#define MCT_L_TCON_INTERVAL_MODE (1 << 2)
+#define MCT_L_TCON_INT_START (1 << 1)
+#define MCT_L_TCON_TIMER_START (1 << 0)
+
+#define TICK_BASE_CNT 1
+
+enum {
+ MCT_INT_SPI,
+ MCT_INT_PPI
+};
+
+enum {
+ MCT_G0_IRQ,
+ MCT_G1_IRQ,
+ MCT_G2_IRQ,
+ MCT_G3_IRQ,
+ MCT_L0_IRQ,
+ MCT_L1_IRQ,
+ MCT_L2_IRQ,
+ MCT_L3_IRQ,
+ MCT_NR_IRQS,
+};
+
+static void __iomem *reg_base;
+static unsigned long clk_rate;
+static unsigned int mct_int_type;
+static int mct_irqs[MCT_NR_IRQS];
+
+struct mct_clock_event_device {
+ struct clock_event_device *evt;
+ unsigned long base;
+ char name[10];
+};
+
+static void exynos4_mct_write(unsigned int value, unsigned long offset)
+{
+ unsigned long stat_addr;
+ u32 mask;
+ u32 i;
+
+ __raw_writel(value, reg_base + offset);
+
+ if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
+ stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
+ switch (offset & EXYNOS4_MCT_L_MASK) {
+ case MCT_L_TCON_OFFSET:
+ mask = 1 << 3; /* L_TCON write status */
+ break;
+ case MCT_L_ICNTB_OFFSET:
+ mask = 1 << 1; /* L_ICNTB write status */
+ break;
+ case MCT_L_TCNTB_OFFSET:
+ mask = 1 << 0; /* L_TCNTB write status */
+ break;
+ default:
+ return;
+ }
+ } else {
+ switch (offset) {
+ case EXYNOS4_MCT_G_TCON:
+ stat_addr = EXYNOS4_MCT_G_WSTAT;
+ mask = 1 << 16; /* G_TCON write status */
+ break;
+ case EXYNOS4_MCT_G_COMP0_L:
+ stat_addr = EXYNOS4_MCT_G_WSTAT;
+ mask = 1 << 0; /* G_COMP0_L write status */
+ break;
+ case EXYNOS4_MCT_G_COMP0_U:
+ stat_addr = EXYNOS4_MCT_G_WSTAT;
+ mask = 1 << 1; /* G_COMP0_U write status */
+ break;
+ case EXYNOS4_MCT_G_COMP0_ADD_INCR:
+ stat_addr = EXYNOS4_MCT_G_WSTAT;
+ mask = 1 << 2; /* G_COMP0_ADD_INCR w status */
+ break;
+ case EXYNOS4_MCT_G_CNT_L:
+ stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
+ mask = 1 << 0; /* G_CNT_L write status */
+ break;
+ case EXYNOS4_MCT_G_CNT_U:
+ stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
+ mask = 1 << 1; /* G_CNT_U write status */
+ break;
+ default:
+ return;
+ }
+ }
+
+ /* Wait maximum 1 ms until written values are applied */
+ for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
+ if (__raw_readl(reg_base + stat_addr) & mask) {
+ __raw_writel(mask, reg_base + stat_addr);
+ return;
+ }
+
+ panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset);
+}
+
+/* Clocksource handling */
+static void exynos4_mct_frc_start(u32 hi, u32 lo)
+{
+ u32 reg;
+
+ exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
+ exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
+
+ reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
+ reg |= MCT_G_TCON_START;
+ exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
+}
+
+static cycle_t exynos4_frc_read(struct clocksource *cs)
+{
+ unsigned int lo, hi;
+ u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
+
+ do {
+ hi = hi2;
+ lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L);
+ hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
+ } while (hi != hi2);
+
+ return ((cycle_t)hi << 32) | lo;
+}
+
+static void exynos4_frc_resume(struct clocksource *cs)
+{
+ exynos4_mct_frc_start(0, 0);
+}
+
+struct clocksource mct_frc = {
+ .name = "mct-frc",
+ .rating = 400,
+ .read = exynos4_frc_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .resume = exynos4_frc_resume,
+};
+
+static void __init exynos4_clocksource_init(void)
+{
+ exynos4_mct_frc_start(0, 0);
+
+ if (clocksource_register_hz(&mct_frc, clk_rate))
+ panic("%s: can't register clocksource\n", mct_frc.name);
+}
+
+static void exynos4_mct_comp0_stop(void)
+{
+ unsigned int tcon;
+
+ tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
+ tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
+
+ exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
+ exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
+}
+
+static void exynos4_mct_comp0_start(enum clock_event_mode mode,
+ unsigned long cycles)
+{
+ unsigned int tcon;
+ cycle_t comp_cycle;
+
+ tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
+
+ if (mode == CLOCK_EVT_MODE_PERIODIC) {
+ tcon |= MCT_G_TCON_COMP0_AUTO_INC;
+ exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
+ }
+
+ comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
+ exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
+ exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
+
+ exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
+
+ tcon |= MCT_G_TCON_COMP0_ENABLE;
+ exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
+}
+
+static int exynos4_comp_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ exynos4_mct_comp0_start(evt->mode, cycles);
+
+ return 0;
+}
+
+static void exynos4_comp_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long cycles_per_jiffy;
+ exynos4_mct_comp0_stop();
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ cycles_per_jiffy =
+ (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
+ exynos4_mct_comp0_start(mode, cycles_per_jiffy);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static struct clock_event_device mct_comp_device = {
+ .name = "mct-comp",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 250,
+ .set_next_event = exynos4_comp_set_next_event,
+ .set_mode = exynos4_comp_set_mode,
+};
+
+static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction mct_comp_event_irq = {
+ .name = "mct_comp_irq",
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = exynos4_mct_comp_isr,
+ .dev_id = &mct_comp_device,
+};
+
+static void exynos4_clockevent_init(void)
+{
+ mct_comp_device.cpumask = cpumask_of(0);
+ clockevents_config_and_register(&mct_comp_device, clk_rate,
+ 0xf, 0xffffffff);
+ setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
+}
+
+#ifdef CONFIG_LOCAL_TIMERS
+
+static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
+
+/* Clock event handling */
+static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
+{
+ unsigned long tmp;
+ unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
+ unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
+
+ tmp = __raw_readl(reg_base + offset);
+ if (tmp & mask) {
+ tmp &= ~mask;
+ exynos4_mct_write(tmp, offset);
+ }
+}
+
+static void exynos4_mct_tick_start(unsigned long cycles,
+ struct mct_clock_event_device *mevt)
+{
+ unsigned long tmp;
+
+ exynos4_mct_tick_stop(mevt);
+
+ tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */
+
+ /* update interrupt count buffer */
+ exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
+
+ /* enable MCT tick interrupt */
+ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
+
+ tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET);
+ tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
+ MCT_L_TCON_INTERVAL_MODE;
+ exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
+}
+
+static int exynos4_tick_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
+
+ exynos4_mct_tick_start(cycles, mevt);
+
+ return 0;
+}
+
+static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
+ unsigned long cycles_per_jiffy;
+
+ exynos4_mct_tick_stop(mevt);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ cycles_per_jiffy =
+ (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
+ exynos4_mct_tick_start(cycles_per_jiffy, mevt);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
+{
+ struct clock_event_device *evt = mevt->evt;
+
+ /*
+ * This is for supporting oneshot mode.
+ * Mct would generate interrupt periodically
+ * without explicit stopping.
+ */
+ if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
+ exynos4_mct_tick_stop(mevt);
+
+ /* Clear the MCT tick interrupt */
+ if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
+ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
+{
+ struct mct_clock_event_device *mevt = dev_id;
+ struct clock_event_device *evt = mevt->evt;
+
+ exynos4_mct_tick_clear(mevt);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction mct_tick0_event_irq = {
+ .name = "mct_tick0_irq",
+ .flags = IRQF_TIMER | IRQF_NOBALANCING,
+ .handler = exynos4_mct_tick_isr,
+};
+
+static struct irqaction mct_tick1_event_irq = {
+ .name = "mct_tick1_irq",
+ .flags = IRQF_TIMER | IRQF_NOBALANCING,
+ .handler = exynos4_mct_tick_isr,
+};
+
+static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
+{
+ struct mct_clock_event_device *mevt;
+ unsigned int cpu = smp_processor_id();
+
+ mevt = this_cpu_ptr(&percpu_mct_tick);
+ mevt->evt = evt;
+
+ mevt->base = EXYNOS4_MCT_L_BASE(cpu);
+ sprintf(mevt->name, "mct_tick%d", cpu);
+
+ evt->name = mevt->name;
+ evt->cpumask = cpumask_of(cpu);
+ evt->set_next_event = exynos4_tick_set_next_event;
+ evt->set_mode = exynos4_tick_set_mode;
+ evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ evt->rating = 450;
+ clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
+ 0xf, 0x7fffffff);
+
+ exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
+
+ if (mct_int_type == MCT_INT_SPI) {
+ if (cpu == 0) {
+ mct_tick0_event_irq.dev_id = mevt;
+ evt->irq = mct_irqs[MCT_L0_IRQ];
+ setup_irq(evt->irq, &mct_tick0_event_irq);
+ } else {
+ mct_tick1_event_irq.dev_id = mevt;
+ evt->irq = mct_irqs[MCT_L1_IRQ];
+ setup_irq(evt->irq, &mct_tick1_event_irq);
+ irq_set_affinity(evt->irq, cpumask_of(1));
+ }
+ } else {
+ enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
+ }
+
+ return 0;
+}
+
+static void exynos4_local_timer_stop(struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+ if (mct_int_type == MCT_INT_SPI)
+ if (cpu == 0)
+ remove_irq(evt->irq, &mct_tick0_event_irq);
+ else
+ remove_irq(evt->irq, &mct_tick1_event_irq);
+ else
+ disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
+}
+
+static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = {
+ .setup = exynos4_local_timer_setup,
+ .stop = exynos4_local_timer_stop,
+};
+#endif /* CONFIG_LOCAL_TIMERS */
+
+static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
+{
+ struct clk *mct_clk, *tick_clk;
+
+ tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
+ clk_get(NULL, "fin_pll");
+ if (IS_ERR(tick_clk))
+ panic("%s: unable to determine tick clock rate\n", __func__);
+ clk_rate = clk_get_rate(tick_clk);
+
+ mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct");
+ if (IS_ERR(mct_clk))
+ panic("%s: unable to retrieve mct clock instance\n", __func__);
+ clk_prepare_enable(mct_clk);
+
+ reg_base = base;
+ if (!reg_base)
+ panic("%s: unable to ioremap mct address space\n", __func__);
+
+#ifdef CONFIG_LOCAL_TIMERS
+ if (mct_int_type == MCT_INT_PPI) {
+ int err;
+
+ err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
+ exynos4_mct_tick_isr, "MCT",
+ &percpu_mct_tick);
+ WARN(err, "MCT: can't request IRQ %d (%d)\n",
+ mct_irqs[MCT_L0_IRQ], err);
+ }
+
+ local_timer_register(&exynos4_mct_tick_ops);
+#endif /* CONFIG_LOCAL_TIMERS */
+}
+
+void __init mct_init(void)
+{
+ if (soc_is_exynos4210()) {
+ mct_irqs[MCT_G0_IRQ] = EXYNOS4_IRQ_MCT_G0;
+ mct_irqs[MCT_L0_IRQ] = EXYNOS4_IRQ_MCT_L0;
+ mct_irqs[MCT_L1_IRQ] = EXYNOS4_IRQ_MCT_L1;
+ mct_int_type = MCT_INT_SPI;
+ } else {
+ panic("unable to determine mct controller type\n");
+ }
+
+ exynos4_timer_resources(NULL, S5P_VA_SYSTIMER);
+ exynos4_clocksource_init();
+ exynos4_clockevent_init();
+}
+
+static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
+{
+ u32 nr_irqs, i;
+
+ mct_int_type = int_type;
+
+ /* This driver uses only one global timer interrupt */
+ mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
+
+ /*
+ * Find out the number of local irqs specified. The local
+ * timer irqs are specified after the four global timer
+ * irqs are specified.
+ */
+#ifdef CONFIG_OF
+ nr_irqs = of_irq_count(np);
+#else
+ nr_irqs = 0;
+#endif
+ for (i = MCT_L0_IRQ; i < nr_irqs; i++)
+ mct_irqs[i] = irq_of_parse_and_map(np, i);
+
+ exynos4_timer_resources(np, of_iomap(np, 0));
+ exynos4_clocksource_init();
+ exynos4_clockevent_init();
+}
+
+
+static void __init mct_init_spi(struct device_node *np)
+{
+ return mct_init_dt(np, MCT_INT_SPI);
+}
+
+static void __init mct_init_ppi(struct device_node *np)
+{
+ return mct_init_dt(np, MCT_INT_PPI);
+}
+CLOCKSOURCE_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
+CLOCKSOURCE_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 488c14cc8dbf..08d0c418c94a 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -54,62 +54,100 @@ struct sh_cmt_priv {
struct clocksource cs;
unsigned long total_cycles;
bool cs_enabled;
+
+ /* callbacks for CMSTR and CMCSR access */
+ unsigned long (*read_control)(void __iomem *base, unsigned long offs);
+ void (*write_control)(void __iomem *base, unsigned long offs,
+ unsigned long value);
+
+ /* callbacks for CMCNT and CMCOR access */
+ unsigned long (*read_count)(void __iomem *base, unsigned long offs);
+ void (*write_count)(void __iomem *base, unsigned long offs,
+ unsigned long value);
};
-static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
+/* Examples of supported CMT timer register layouts and I/O access widths:
+ *
+ * "16-bit counter and 16-bit control" as found on sh7263:
+ * CMSTR 0xfffec000 16-bit
+ * CMCSR 0xfffec002 16-bit
+ * CMCNT 0xfffec004 16-bit
+ * CMCOR 0xfffec006 16-bit
+ *
+ * "32-bit counter and 16-bit control" as found on sh7372, sh73a0, r8a7740:
+ * CMSTR 0xffca0000 16-bit
+ * CMCSR 0xffca0060 16-bit
+ * CMCNT 0xffca0064 32-bit
+ * CMCOR 0xffca0068 32-bit
+ */
+
+static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
+{
+ return ioread16(base + (offs << 1));
+}
+
+static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
+{
+ return ioread32(base + (offs << 2));
+}
+
+static void sh_cmt_write16(void __iomem *base, unsigned long offs,
+ unsigned long value)
+{
+ iowrite16(value, base + (offs << 1));
+}
+
+static void sh_cmt_write32(void __iomem *base, unsigned long offs,
+ unsigned long value)
+{
+ iowrite32(value, base + (offs << 2));
+}
-#define CMSTR -1 /* shared register */
#define CMCSR 0 /* channel register */
#define CMCNT 1 /* channel register */
#define CMCOR 2 /* channel register */
-static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
+static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
- void __iomem *base = p->mapbase;
- unsigned long offs;
-
- if (reg_nr == CMSTR) {
- offs = 0;
- base -= cfg->channel_offset;
- } else
- offs = reg_nr;
-
- if (p->width == 16)
- offs <<= 1;
- else {
- offs <<= 2;
- if ((reg_nr == CMCNT) || (reg_nr == CMCOR))
- return ioread32(base + offs);
- }
- return ioread16(base + offs);
+ return p->read_control(p->mapbase - cfg->channel_offset, 0);
}
-static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
- unsigned long value)
+static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
+{
+ return p->read_control(p->mapbase, CMCSR);
+}
+
+static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
+{
+ return p->read_count(p->mapbase, CMCNT);
+}
+
+static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
+ unsigned long value)
{
struct sh_timer_config *cfg = p->pdev->dev.platform_data;
- void __iomem *base = p->mapbase;
- unsigned long offs;
-
- if (reg_nr == CMSTR) {
- offs = 0;
- base -= cfg->channel_offset;
- } else
- offs = reg_nr;
-
- if (p->width == 16)
- offs <<= 1;
- else {
- offs <<= 2;
- if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) {
- iowrite32(value, base + offs);
- return;
- }
- }
- iowrite16(value, base + offs);
+ p->write_control(p->mapbase - cfg->channel_offset, 0, value);
+}
+
+static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
+ unsigned long value)
+{
+ p->write_control(p->mapbase, CMCSR, value);
+}
+
+static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p,
+ unsigned long value)
+{
+ p->write_count(p->mapbase, CMCNT, value);
+}
+
+static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p,
+ unsigned long value)
+{
+ p->write_count(p->mapbase, CMCOR, value);
}
static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
@@ -118,15 +156,15 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
unsigned long v1, v2, v3;
int o1, o2;
- o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
+ o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
/* Make sure the timer value is stable. Stolen from acpi_pm.c */
do {
o2 = o1;
- v1 = sh_cmt_read(p, CMCNT);
- v2 = sh_cmt_read(p, CMCNT);
- v3 = sh_cmt_read(p, CMCNT);
- o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
+ v1 = sh_cmt_read_cmcnt(p);
+ v2 = sh_cmt_read_cmcnt(p);
+ v3 = sh_cmt_read_cmcnt(p);
+ o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
} while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
|| (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
@@ -134,6 +172,7 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
return v2;
}
+static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
{
@@ -142,14 +181,14 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&sh_cmt_lock, flags);
- value = sh_cmt_read(p, CMSTR);
+ value = sh_cmt_read_cmstr(p);
if (start)
value |= 1 << cfg->timer_bit;
else
value &= ~(1 << cfg->timer_bit);
- sh_cmt_write(p, CMSTR, value);
+ sh_cmt_write_cmstr(p, value);
raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
}
@@ -173,14 +212,14 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
/* configure channel, periodic mode and maximum timeout */
if (p->width == 16) {
*rate = clk_get_rate(p->clk) / 512;
- sh_cmt_write(p, CMCSR, 0x43);
+ sh_cmt_write_cmcsr(p, 0x43);
} else {
*rate = clk_get_rate(p->clk) / 8;
- sh_cmt_write(p, CMCSR, 0x01a4);
+ sh_cmt_write_cmcsr(p, 0x01a4);
}
- sh_cmt_write(p, CMCOR, 0xffffffff);
- sh_cmt_write(p, CMCNT, 0);
+ sh_cmt_write_cmcor(p, 0xffffffff);
+ sh_cmt_write_cmcnt(p, 0);
/*
* According to the sh73a0 user's manual, as CMCNT can be operated
@@ -194,12 +233,12 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
* take RCLKx2 at maximum.
*/
for (k = 0; k < 100; k++) {
- if (!sh_cmt_read(p, CMCNT))
+ if (!sh_cmt_read_cmcnt(p))
break;
udelay(1);
}
- if (sh_cmt_read(p, CMCNT)) {
+ if (sh_cmt_read_cmcnt(p)) {
dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
ret = -ETIMEDOUT;
goto err1;
@@ -222,7 +261,7 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
sh_cmt_start_stop_ch(p, 0);
/* disable interrupts in CMT block */
- sh_cmt_write(p, CMCSR, 0);
+ sh_cmt_write_cmcsr(p, 0);
/* stop clock */
clk_disable(p->clk);
@@ -270,7 +309,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
if (new_match > p->max_match_value)
new_match = p->max_match_value;
- sh_cmt_write(p, CMCOR, new_match);
+ sh_cmt_write_cmcor(p, new_match);
now = sh_cmt_get_counter(p, &has_wrapped);
if (has_wrapped && (new_match > p->match_value)) {
@@ -346,7 +385,7 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
struct sh_cmt_priv *p = dev_id;
/* clear flags */
- sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits);
+ sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits);
/* update clock source counter to begin with if enabled
* the wrap flag should be cleared by the timer specific
@@ -625,14 +664,6 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
unsigned long clockevent_rating,
unsigned long clocksource_rating)
{
- if (p->width == (sizeof(p->max_match_value) * 8))
- p->max_match_value = ~0;
- else
- p->max_match_value = (1 << p->width) - 1;
-
- p->match_value = p->max_match_value;
- raw_spin_lock_init(&p->lock);
-
if (clockevent_rating)
sh_cmt_register_clockevent(p, name, clockevent_rating);
@@ -657,8 +688,6 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
goto err0;
}
- platform_set_drvdata(pdev, p);
-
res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&p->pdev->dev, "failed to get I/O memory\n");
@@ -693,32 +722,51 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
goto err1;
}
+ p->read_control = sh_cmt_read16;
+ p->write_control = sh_cmt_write16;
+
if (resource_size(res) == 6) {
p->width = 16;
+ p->read_count = sh_cmt_read16;
+ p->write_count = sh_cmt_write16;
p->overflow_bit = 0x80;
p->clear_bits = ~0x80;
} else {
p->width = 32;
+ p->read_count = sh_cmt_read32;
+ p->write_count = sh_cmt_write32;
p->overflow_bit = 0x8000;
p->clear_bits = ~0xc000;
}
+ if (p->width == (sizeof(p->max_match_value) * 8))
+ p->max_match_value = ~0;
+ else
+ p->max_match_value = (1 << p->width) - 1;
+
+ p->match_value = p->max_match_value;
+ raw_spin_lock_init(&p->lock);
+
ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating,
cfg->clocksource_rating);
if (ret) {
dev_err(&p->pdev->dev, "registration failed\n");
- goto err1;
+ goto err2;
}
p->cs_enabled = false;
ret = setup_irq(irq, &p->irqaction);
if (ret) {
dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
- goto err1;
+ goto err2;
}
+ platform_set_drvdata(pdev, p);
+
return 0;
+err2:
+ clk_put(p->clk);
err1:
iounmap(p->mapbase);
@@ -751,7 +799,6 @@ static int sh_cmt_probe(struct platform_device *pdev)
ret = sh_cmt_setup(p, pdev);
if (ret) {
kfree(p);
- platform_set_drvdata(pdev, NULL);
pm_runtime_idle(&pdev->dev);
return ret;
}
@@ -791,7 +838,7 @@ static void __exit sh_cmt_exit(void)
}
early_platform_init("earlytimer", &sh_cmt_device_driver);
-module_init(sh_cmt_init);
+subsys_initcall(sh_cmt_init);
module_exit(sh_cmt_exit);
MODULE_AUTHOR("Magnus Damm");
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 83943e27cfac..4aac9ee0d0c0 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -386,7 +386,7 @@ static void __exit sh_mtu2_exit(void)
}
early_platform_init("earlytimer", &sh_mtu2_device_driver);
-module_init(sh_mtu2_init);
+subsys_initcall(sh_mtu2_init);
module_exit(sh_mtu2_exit);
MODULE_AUTHOR("Magnus Damm");
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index b4502edce2a1..78b8dae49628 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -549,7 +549,7 @@ static void __exit sh_tmu_exit(void)
}
early_platform_init("earlytimer", &sh_tmu_device_driver);
-module_init(sh_tmu_init);
+subsys_initcall(sh_tmu_init);
module_exit(sh_tmu_exit);
MODULE_AUTHOR("Magnus Damm");