summaryrefslogtreecommitdiff
path: root/arch/sh/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/drivers/dma')
-rw-r--r--arch/sh/drivers/dma/Kconfig55
-rw-r--r--arch/sh/drivers/dma/Makefile9
-rw-r--r--arch/sh/drivers/dma/dma-api.c292
-rw-r--r--arch/sh/drivers/dma/dma-g2.c171
-rw-r--r--arch/sh/drivers/dma/dma-isa.c106
-rw-r--r--arch/sh/drivers/dma/dma-pvr2.c109
-rw-r--r--arch/sh/drivers/dma/dma-sh.c267
-rw-r--r--arch/sh/drivers/dma/dma-sh.h52
-rw-r--r--arch/sh/drivers/dma/dma-sysfs.c133
9 files changed, 1194 insertions, 0 deletions
diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig
new file mode 100644
index 000000000000..0f15216cd39d
--- /dev/null
+++ b/arch/sh/drivers/dma/Kconfig
@@ -0,0 +1,55 @@
+menu "DMA support"
+
+config SH_DMA
+ bool "DMA controller (DMAC) support"
+ help
+ Selecting this option will provide same API as PC's Direct Memory
+ Access Controller(8237A) for SuperH DMAC.
+
+ If unsure, say N.
+
+config NR_ONCHIP_DMA_CHANNELS
+ depends on SH_DMA
+ int "Number of on-chip DMAC channels"
+ default "4"
+ help
+ This allows you to specify the number of channels that the on-chip
+ DMAC supports. This will be 4 for SH7750/SH7751 and 8 for the
+ SH7750R/SH7751R.
+
+config NR_DMA_CHANNELS_BOOL
+ depends on SH_DMA
+ bool "Override default number of maximum DMA channels"
+ help
+ This allows you to forcibly update the maximum number of supported
+ DMA channels for a given board. If this is unset, this will default
+ to the number of channels that the on-chip DMAC has.
+
+config NR_DMA_CHANNELS
+ int "Maximum number of DMA channels"
+ depends on SH_DMA && NR_DMA_CHANNELS_BOOL
+ default NR_ONCHIP_DMA_CHANNELS
+ help
+ This allows you to specify the maximum number of DMA channels to
+ support. Setting this to a higher value allows for cascading DMACs
+ with additional channels.
+
+config DMA_PAGE_OPS
+ bool "Use DMAC for page copy/clear"
+ depends on SH_DMA && BROKEN
+ help
+ Selecting this option will use a dual-address mode configured channel
+ in the SH DMAC for copy_page()/clear_page(). Primarily a performance
+ hack.
+
+config DMA_PAGE_OPS_CHANNEL
+ depends on DMA_PAGE_OPS
+ int "DMA channel for sh memory-manager page copy/clear"
+ default "3"
+ help
+ This allows the specification of the dual address dma channel,
+ in case channel 3 is unavailable. On the SH4, channels 1,2, and 3
+ are dual-address capable.
+
+endmenu
+
diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile
new file mode 100644
index 000000000000..065d4c90970e
--- /dev/null
+++ b/arch/sh/drivers/dma/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the SuperH DMA specific kernel interface routines under Linux.
+#
+
+obj-y += dma-api.o dma-isa.o
+obj-$(CONFIG_SYSFS) += dma-sysfs.o
+obj-$(CONFIG_SH_DMA) += dma-sh.o
+obj-$(CONFIG_SH_DREAMCAST) += dma-pvr2.o dma-g2.o
+
diff --git a/arch/sh/drivers/dma/dma-api.c b/arch/sh/drivers/dma/dma-api.c
new file mode 100644
index 000000000000..96e3036ec2bb
--- /dev/null
+++ b/arch/sh/drivers/dma/dma-api.c
@@ -0,0 +1,292 @@
+/*
+ * arch/sh/drivers/dma/dma-api.c
+ *
+ * SuperH-specific DMA management API
+ *
+ * Copyright (C) 2003, 2004 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include <linux/list.h>
+#include <asm/dma.h>
+
+DEFINE_SPINLOCK(dma_spin_lock);
+static LIST_HEAD(registered_dmac_list);
+
+/*
+ * A brief note about the reasons for this API as it stands.
+ *
+ * For starters, the old ISA DMA API didn't work for us for a number of
+ * reasons, for one, the vast majority of channels on the SH DMAC are
+ * dual-address mode only, and both the new and the old DMA APIs are after the
+ * concept of managing a DMA buffer, which doesn't overly fit this model very
+ * well. In addition to which, the new API is largely geared at IOMMUs and
+ * GARTs, and doesn't even support the channel notion very well.
+ *
+ * The other thing that's a marginal issue, is the sheer number of random DMA
+ * engines that are present (ie, in boards like the Dreamcast), some of which
+ * cascade off of the SH DMAC, and others do not. As such, there was a real
+ * need for a scalable subsystem that could deal with both single and
+ * dual-address mode usage, in addition to interoperating with cascaded DMACs.
+ *
+ * There really isn't any reason why this needs to be SH specific, though I'm
+ * not aware of too many other processors (with the exception of some MIPS)
+ * that have the same concept of a dual address mode, or any real desire to
+ * actually make use of the DMAC even if such a subsystem were exposed
+ * elsewhere.
+ *
+ * The idea for this was derived from the ARM port, which acted as an excellent
+ * reference when trying to address these issues.
+ *
+ * It should also be noted that the decision to add Yet Another DMA API(tm) to
+ * the kernel wasn't made easily, and was only decided upon after conferring
+ * with jejb with regards to the state of the old and new APIs as they applied
+ * to these circumstances. Philip Blundell was also a great help in figuring
+ * out some single-address mode DMA semantics that were otherwise rather
+ * confusing.
+ */
+
+struct dma_info *get_dma_info(unsigned int chan)
+{
+ struct list_head *pos, *tmp;
+ unsigned int total = 0;
+
+ /*
+ * Look for each DMAC's range to determine who the owner of
+ * the channel is.
+ */
+ list_for_each_safe(pos, tmp, &registered_dmac_list) {
+ struct dma_info *info = list_entry(pos, struct dma_info, list);
+
+ total += info->nr_channels;
+ if (chan > total)
+ continue;
+
+ return info;
+ }
+
+ return NULL;
+}
+
+struct dma_channel *get_dma_channel(unsigned int chan)
+{
+ struct dma_info *info = get_dma_info(chan);
+
+ if (!info)
+ return ERR_PTR(-EINVAL);
+
+ return info->channels + chan;
+}
+
+int get_dma_residue(unsigned int chan)
+{
+ struct dma_info *info = get_dma_info(chan);
+ struct dma_channel *channel = &info->channels[chan];
+
+ if (info->ops->get_residue)
+ return info->ops->get_residue(channel);
+
+ return 0;
+}
+
+int request_dma(unsigned int chan, const char *dev_id)
+{
+ struct dma_info *info = get_dma_info(chan);
+ struct dma_channel *channel = &info->channels[chan];
+
+ down(&channel->sem);
+
+ if (!info->ops || chan >= MAX_DMA_CHANNELS) {
+ up(&channel->sem);
+ return -EINVAL;
+ }
+
+ atomic_set(&channel->busy, 1);
+
+ strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id));
+
+ up(&channel->sem);
+
+ if (info->ops->request)
+ return info->ops->request(channel);
+
+ return 0;
+}
+
+void free_dma(unsigned int chan)
+{
+ struct dma_info *info = get_dma_info(chan);
+ struct dma_channel *channel = &info->channels[chan];
+
+ if (info->ops->free)
+ info->ops->free(channel);
+
+ atomic_set(&channel->busy, 0);
+}
+
+void dma_wait_for_completion(unsigned int chan)
+{
+ struct dma_info *info = get_dma_info(chan);
+ struct dma_channel *channel = &info->channels[chan];
+
+ if (channel->flags & DMA_TEI_CAPABLE) {
+ wait_event(channel->wait_queue,
+ (info->ops->get_residue(channel) == 0));
+ return;
+ }
+
+ while (info->ops->get_residue(channel))
+ cpu_relax();
+}
+
+void dma_configure_channel(unsigned int chan, unsigned long flags)
+{
+ struct dma_info *info = get_dma_info(chan);
+ struct dma_channel *channel = &info->channels[chan];
+
+ if (info->ops->configure)
+ info->ops->configure(channel, flags);
+}
+
+int dma_xfer(unsigned int chan, unsigned long from,
+ unsigned long to, size_t size, unsigned int mode)
+{
+ struct dma_info *info = get_dma_info(chan);
+ struct dma_channel *channel = &info->channels[chan];
+
+ channel->sar = from;
+ channel->dar = to;
+ channel->count = size;
+ channel->mode = mode;
+
+ return info->ops->xfer(channel);
+}
+
+#ifdef CONFIG_PROC_FS
+static int dma_read_proc(char *buf, char **start, off_t off,
+ int len, int *eof, void *data)
+{
+ struct list_head *pos, *tmp;
+ char *p = buf;
+
+ if (list_empty(&registered_dmac_list))
+ return 0;
+
+ /*
+ * Iterate over each registered DMAC
+ */
+ list_for_each_safe(pos, tmp, &registered_dmac_list) {
+ struct dma_info *info = list_entry(pos, struct dma_info, list);
+ int i;
+
+ /*
+ * Iterate over each channel
+ */
+ for (i = 0; i < info->nr_channels; i++) {
+ struct dma_channel *channel = info->channels + i;
+
+ if (!(channel->flags & DMA_CONFIGURED))
+ continue;
+
+ p += sprintf(p, "%2d: %14s %s\n", i,
+ info->name, channel->dev_id);
+ }
+ }
+
+ return p - buf;
+}
+#endif
+
+
+int __init register_dmac(struct dma_info *info)
+{
+ int i;
+
+ INIT_LIST_HEAD(&info->list);
+
+ printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n",
+ info->name, info->nr_channels,
+ info->nr_channels > 1 ? "s" : "");
+
+ BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels);
+
+ /*
+ * Don't touch pre-configured channels
+ */
+ if (!(info->flags & DMAC_CHANNELS_CONFIGURED)) {
+ unsigned int size;
+
+ size = sizeof(struct dma_channel) * info->nr_channels;
+
+ info->channels = kmalloc(size, GFP_KERNEL);
+ if (!info->channels)
+ return -ENOMEM;
+
+ memset(info->channels, 0, size);
+ }
+
+ for (i = 0; i < info->nr_channels; i++) {
+ struct dma_channel *chan = info->channels + i;
+
+ chan->chan = i;
+
+ memcpy(chan->dev_id, "Unused", 7);
+
+ if (info->flags & DMAC_CHANNELS_TEI_CAPABLE)
+ chan->flags |= DMA_TEI_CAPABLE;
+
+ init_MUTEX(&chan->sem);
+ init_waitqueue_head(&chan->wait_queue);
+
+#ifdef CONFIG_SYSFS
+ dma_create_sysfs_files(chan);
+#endif
+ }
+
+ list_add(&info->list, &registered_dmac_list);
+
+ return 0;
+}
+
+void __exit unregister_dmac(struct dma_info *info)
+{
+ if (!(info->flags & DMAC_CHANNELS_CONFIGURED))
+ kfree(info->channels);
+
+ list_del(&info->list);
+}
+
+static int __init dma_api_init(void)
+{
+ printk("DMA: Registering DMA API.\n");
+
+#ifdef CONFIG_PROC_FS
+ create_proc_read_entry("dma", 0, 0, dma_read_proc, 0);
+#endif
+
+ return 0;
+}
+
+subsys_initcall(dma_api_init);
+
+MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
+MODULE_DESCRIPTION("DMA API for SuperH");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(request_dma);
+EXPORT_SYMBOL(free_dma);
+EXPORT_SYMBOL(register_dmac);
+EXPORT_SYMBOL(get_dma_residue);
+EXPORT_SYMBOL(get_dma_info);
+EXPORT_SYMBOL(get_dma_channel);
+EXPORT_SYMBOL(dma_xfer);
+EXPORT_SYMBOL(dma_wait_for_completion);
+EXPORT_SYMBOL(dma_configure_channel);
+
diff --git a/arch/sh/drivers/dma/dma-g2.c b/arch/sh/drivers/dma/dma-g2.c
new file mode 100644
index 000000000000..231e3f6fb28f
--- /dev/null
+++ b/arch/sh/drivers/dma/dma-g2.c
@@ -0,0 +1,171 @@
+/*
+ * arch/sh/drivers/dma/dma-g2.c
+ *
+ * G2 bus DMA support
+ *
+ * Copyright (C) 2003, 2004 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach/sysasic.h>
+#include <asm/mach/dma.h>
+#include <asm/dma.h>
+
+struct g2_channel {
+ unsigned long g2_addr; /* G2 bus address */
+ unsigned long root_addr; /* Root bus (SH-4) address */
+ unsigned long size; /* Size (in bytes), 32-byte aligned */
+ unsigned long direction; /* Transfer direction */
+ unsigned long ctrl; /* Transfer control */
+ unsigned long chan_enable; /* Channel enable */
+ unsigned long xfer_enable; /* Transfer enable */
+ unsigned long xfer_stat; /* Transfer status */
+} __attribute__ ((aligned(32)));
+
+struct g2_status {
+ unsigned long g2_addr;
+ unsigned long root_addr;
+ unsigned long size;
+ unsigned long status;
+} __attribute__ ((aligned(16)));
+
+struct g2_dma_info {
+ struct g2_channel channel[G2_NR_DMA_CHANNELS];
+ unsigned long pad1[G2_NR_DMA_CHANNELS];
+ unsigned long wait_state;
+ unsigned long pad2[10];
+ unsigned long magic;
+ struct g2_status status[G2_NR_DMA_CHANNELS];
+} __attribute__ ((aligned(256)));
+
+static volatile struct g2_dma_info *g2_dma = (volatile struct g2_dma_info *)0xa05f7800;
+
+static irqreturn_t g2_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /* FIXME: Do some meaningful completion work here.. */
+ return IRQ_HANDLED;
+}
+
+static struct irqaction g2_dma_irq = {
+ .name = "g2 DMA handler",
+ .handler = g2_dma_interrupt,
+ .flags = SA_INTERRUPT,
+};
+
+static int g2_enable_dma(struct dma_channel *chan)
+{
+ unsigned int chan_nr = chan->chan;
+
+ g2_dma->channel[chan_nr].chan_enable = 1;
+ g2_dma->channel[chan_nr].xfer_enable = 1;
+
+ return 0;
+}
+
+static int g2_disable_dma(struct dma_channel *chan)
+{
+ unsigned int chan_nr = chan->chan;
+
+ g2_dma->channel[chan_nr].chan_enable = 0;
+ g2_dma->channel[chan_nr].xfer_enable = 0;
+
+ return 0;
+}
+
+static int g2_xfer_dma(struct dma_channel *chan)
+{
+ unsigned int chan_nr = chan->chan;
+
+ if (chan->sar & 31) {
+ printk("g2dma: unaligned source 0x%lx\n", chan->sar);
+ return -EINVAL;
+ }
+
+ if (chan->dar & 31) {
+ printk("g2dma: unaligned dest 0x%lx\n", chan->dar);
+ return -EINVAL;
+ }
+
+ /* Align the count */
+ if (chan->count & 31)
+ chan->count = (chan->count + (32 - 1)) & ~(32 - 1);
+
+ /* Fixup destination */
+ chan->dar += 0xa0800000;
+
+ /* Fixup direction */
+ chan->mode = !chan->mode;
+
+ flush_icache_range((unsigned long)chan->sar, chan->count);
+
+ g2_disable_dma(chan);
+
+ g2_dma->channel[chan_nr].g2_addr = chan->dar & 0x1fffffe0;
+ g2_dma->channel[chan_nr].root_addr = chan->sar & 0x1fffffe0;
+ g2_dma->channel[chan_nr].size = (chan->count & ~31) | 0x80000000;
+ g2_dma->channel[chan_nr].direction = chan->mode;
+
+ /*
+ * bit 0 - ???
+ * bit 1 - if set, generate a hardware event on transfer completion
+ * bit 2 - ??? something to do with suspend?
+ */
+ g2_dma->channel[chan_nr].ctrl = 5; /* ?? */
+
+ g2_enable_dma(chan);
+
+ /* debug cruft */
+ pr_debug("count, sar, dar, mode, ctrl, chan, xfer: %ld, 0x%08lx, "
+ "0x%08lx, %ld, %ld, %ld, %ld\n",
+ g2_dma->channel[chan_nr].size,
+ g2_dma->channel[chan_nr].root_addr,
+ g2_dma->channel[chan_nr].g2_addr,
+ g2_dma->channel[chan_nr].direction,
+ g2_dma->channel[chan_nr].ctrl,
+ g2_dma->channel[chan_nr].chan_enable,
+ g2_dma->channel[chan_nr].xfer_enable);
+
+ return 0;
+}
+
+static struct dma_ops g2_dma_ops = {
+ .xfer = g2_xfer_dma,
+};
+
+static struct dma_info g2_dma_info = {
+ .name = "G2 DMA",
+ .nr_channels = 4,
+ .ops = &g2_dma_ops,
+ .flags = DMAC_CHANNELS_TEI_CAPABLE,
+};
+
+static int __init g2_dma_init(void)
+{
+ setup_irq(HW_EVENT_G2_DMA, &g2_dma_irq);
+
+ /* Magic */
+ g2_dma->wait_state = 27;
+ g2_dma->magic = 0x4659404f;
+
+ return register_dmac(&g2_dma_info);
+}
+
+static void __exit g2_dma_exit(void)
+{
+ free_irq(HW_EVENT_G2_DMA, 0);
+}
+
+subsys_initcall(g2_dma_init);
+module_exit(g2_dma_exit);
+
+MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
+MODULE_DESCRIPTION("G2 bus DMA driver");
+MODULE_LICENSE("GPL");
+
diff --git a/arch/sh/drivers/dma/dma-isa.c b/arch/sh/drivers/dma/dma-isa.c
new file mode 100644
index 000000000000..1c9bc45b8bcb
--- /dev/null
+++ b/arch/sh/drivers/dma/dma-isa.c
@@ -0,0 +1,106 @@
+/*
+ * arch/sh/drivers/dma/dma-isa.c
+ *
+ * Generic ISA DMA wrapper for SH DMA API
+ *
+ * Copyright (C) 2003, 2004 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/dma.h>
+
+/*
+ * This implements a small wrapper set to make code using the old ISA DMA API
+ * work with the SH DMA API. Since most of the work in the new API happens
+ * at ops->xfer() time, we simply use the various set_dma_xxx() routines to
+ * fill in per-channel info, and then hand hand this off to ops->xfer() at
+ * enable_dma() time.
+ *
+ * For channels that are doing on-demand data transfer via cascading, the
+ * channel itself will still need to be configured through the new API. As
+ * such, this code is meant for only the simplest of tasks (and shouldn't be
+ * used in any new drivers at all).
+ *
+ * It should also be noted that various functions here are labelled as
+ * being deprecated. This is due to the fact that the ops->xfer() method is
+ * the preferred way of doing things (as well as just grabbing the spinlock
+ * directly). As such, any users of this interface will be warned rather
+ * loudly.
+ */
+
+unsigned long __deprecated claim_dma_lock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dma_spin_lock, flags);
+
+ return flags;
+}
+EXPORT_SYMBOL(claim_dma_lock);
+
+void __deprecated release_dma_lock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+EXPORT_SYMBOL(release_dma_lock);
+
+void __deprecated disable_dma(unsigned int chan)
+{
+ /* Nothing */
+}
+EXPORT_SYMBOL(disable_dma);
+
+void __deprecated enable_dma(unsigned int chan)
+{
+ struct dma_info *info = get_dma_info(chan);
+ struct dma_channel *channel = &info->channels[chan];
+
+ info->ops->xfer(channel);
+}
+EXPORT_SYMBOL(enable_dma);
+
+void clear_dma_ff(unsigned int chan)
+{
+ /* Nothing */
+}
+EXPORT_SYMBOL(clear_dma_ff);
+
+void set_dma_mode(unsigned int chan, char mode)
+{
+ struct dma_info *info = get_dma_info(chan);
+ struct dma_channel *channel = &info->channels[chan];
+
+ channel->mode = mode;
+}
+EXPORT_SYMBOL(set_dma_mode);
+
+void set_dma_addr(unsigned int chan, unsigned int addr)
+{
+ struct dma_info *info = get_dma_info(chan);
+ struct dma_channel *channel = &info->channels[chan];
+
+ /*
+ * Single address mode is the only thing supported through
+ * this interface.
+ */
+ if ((channel->mode & DMA_MODE_MASK) == DMA_MODE_READ) {
+ channel->sar = addr;
+ } else {
+ channel->dar = addr;
+ }
+}
+EXPORT_SYMBOL(set_dma_addr);
+
+void set_dma_count(unsigned int chan, unsigned int count)
+{
+ struct dma_info *info = get_dma_info(chan);
+ struct dma_channel *channel = &info->channels[chan];
+
+ channel->count = count;
+}
+EXPORT_SYMBOL(set_dma_count);
+
diff --git a/arch/sh/drivers/dma/dma-pvr2.c b/arch/sh/drivers/dma/dma-pvr2.c
new file mode 100644
index 000000000000..2e1d58f2d1b9
--- /dev/null
+++ b/arch/sh/drivers/dma/dma-pvr2.c
@@ -0,0 +1,109 @@
+/*
+ * arch/sh/boards/dreamcast/dma-pvr2.c
+ *
+ * NEC PowerVR 2 (Dreamcast) DMA support
+ *
+ * Copyright (C) 2003, 2004 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <asm/mach/sysasic.h>
+#include <asm/mach/dma.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+
+static unsigned int xfer_complete = 0;
+static int count = 0;
+
+static irqreturn_t pvr2_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ if (get_dma_residue(PVR2_CASCADE_CHAN)) {
+ printk(KERN_WARNING "DMA: SH DMAC did not complete transfer "
+ "on channel %d, waiting..\n", PVR2_CASCADE_CHAN);
+ dma_wait_for_completion(PVR2_CASCADE_CHAN);
+ }
+
+ if (count++ < 10)
+ pr_debug("Got a pvr2 dma interrupt for channel %d\n",
+ irq - HW_EVENT_PVR2_DMA);
+
+ xfer_complete = 1;
+
+ return IRQ_HANDLED;
+}
+
+static int pvr2_request_dma(struct dma_channel *chan)
+{
+ if (ctrl_inl(PVR2_DMA_MODE) != 0)
+ return -EBUSY;
+
+ ctrl_outl(0, PVR2_DMA_LMMODE0);
+
+ return 0;
+}
+
+static int pvr2_get_dma_residue(struct dma_channel *chan)
+{
+ return xfer_complete == 0;
+}
+
+static int pvr2_xfer_dma(struct dma_channel *chan)
+{
+ if (chan->sar || !chan->dar)
+ return -EINVAL;
+
+ xfer_complete = 0;
+
+ ctrl_outl(chan->dar, PVR2_DMA_ADDR);
+ ctrl_outl(chan->count, PVR2_DMA_COUNT);
+ ctrl_outl(chan->mode & DMA_MODE_MASK, PVR2_DMA_MODE);
+
+ return 0;
+}
+
+static struct irqaction pvr2_dma_irq = {
+ .name = "pvr2 DMA handler",
+ .handler = pvr2_dma_interrupt,
+ .flags = SA_INTERRUPT,
+};
+
+static struct dma_ops pvr2_dma_ops = {
+ .request = pvr2_request_dma,
+ .get_residue = pvr2_get_dma_residue,
+ .xfer = pvr2_xfer_dma,
+};
+
+static struct dma_info pvr2_dma_info = {
+ .name = "PowerVR 2 DMA",
+ .nr_channels = 1,
+ .ops = &pvr2_dma_ops,
+ .flags = DMAC_CHANNELS_TEI_CAPABLE,
+};
+
+static int __init pvr2_dma_init(void)
+{
+ setup_irq(HW_EVENT_PVR2_DMA, &pvr2_dma_irq);
+ request_dma(PVR2_CASCADE_CHAN, "pvr2 cascade");
+
+ return register_dmac(&pvr2_dma_info);
+}
+
+static void __exit pvr2_dma_exit(void)
+{
+ free_dma(PVR2_CASCADE_CHAN);
+ free_irq(HW_EVENT_PVR2_DMA, 0);
+}
+
+subsys_initcall(pvr2_dma_init);
+module_exit(pvr2_dma_exit);
+
+MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
+MODULE_DESCRIPTION("NEC PowerVR 2 DMA driver");
+MODULE_LICENSE("GPL");
+
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
new file mode 100644
index 000000000000..31dacd4444b2
--- /dev/null
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -0,0 +1,267 @@
+/*
+ * arch/sh/drivers/dma/dma-sh.c
+ *
+ * SuperH On-chip DMAC Support
+ *
+ * Copyright (C) 2000 Takashi YOSHII
+ * Copyright (C) 2003, 2004 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <asm/signal.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include "dma-sh.h"
+
+/*
+ * The SuperH DMAC supports a number of transmit sizes, we list them here,
+ * with their respective values as they appear in the CHCR registers.
+ *
+ * Defaults to a 64-bit transfer size.
+ */
+enum {
+ XMIT_SZ_64BIT,
+ XMIT_SZ_8BIT,
+ XMIT_SZ_16BIT,
+ XMIT_SZ_32BIT,
+ XMIT_SZ_256BIT,
+};
+
+/*
+ * The DMA count is defined as the number of bytes to transfer.
+ */
+static unsigned int ts_shift[] = {
+ [XMIT_SZ_64BIT] = 3,
+ [XMIT_SZ_8BIT] = 0,
+ [XMIT_SZ_16BIT] = 1,
+ [XMIT_SZ_32BIT] = 2,
+ [XMIT_SZ_256BIT] = 5,
+};
+
+static inline unsigned int get_dmte_irq(unsigned int chan)
+{
+ unsigned int irq;
+
+ /*
+ * Normally we could just do DMTE0_IRQ + chan outright, though in the
+ * case of the 7751R, the DMTE IRQs for channels > 4 start right above
+ * the SCIF
+ */
+
+ if (chan < 4) {
+ irq = DMTE0_IRQ + chan;
+ } else {
+ irq = DMTE4_IRQ + chan - 4;
+ }
+
+ return irq;
+}
+
+/*
+ * We determine the correct shift size based off of the CHCR transmit size
+ * for the given channel. Since we know that it will take:
+ *
+ * info->count >> ts_shift[transmit_size]
+ *
+ * iterations to complete the transfer.
+ */
+static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
+{
+ u32 chcr = ctrl_inl(CHCR[chan->chan]);
+
+ chcr >>= 4;
+
+ return ts_shift[chcr & 0x0007];
+}
+
+/*
+ * The transfer end interrupt must read the chcr register to end the
+ * hardware interrupt active condition.
+ * Besides that it needs to waken any waiting process, which should handle
+ * setting up the next transfer.
+ */
+static irqreturn_t dma_tei(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct dma_channel *chan = (struct dma_channel *)dev_id;
+ u32 chcr;
+
+ chcr = ctrl_inl(CHCR[chan->chan]);
+
+ if (!(chcr & CHCR_TE))
+ return IRQ_NONE;
+
+ chcr &= ~(CHCR_IE | CHCR_DE);
+ ctrl_outl(chcr, CHCR[chan->chan]);
+
+ wake_up(&chan->wait_queue);
+
+ return IRQ_HANDLED;
+}
+
+static int sh_dmac_request_dma(struct dma_channel *chan)
+{
+ return request_irq(get_dmte_irq(chan->chan), dma_tei,
+ SA_INTERRUPT, "DMAC Transfer End", chan);
+}
+
+static void sh_dmac_free_dma(struct dma_channel *chan)
+{
+ free_irq(get_dmte_irq(chan->chan), chan);
+}
+
+static void sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
+{
+ if (!chcr)
+ chcr = RS_DUAL;
+
+ ctrl_outl(chcr, CHCR[chan->chan]);
+
+ chan->flags |= DMA_CONFIGURED;
+}
+
+static void sh_dmac_enable_dma(struct dma_channel *chan)
+{
+ int irq = get_dmte_irq(chan->chan);
+ u32 chcr;
+
+ chcr = ctrl_inl(CHCR[chan->chan]);
+ chcr |= CHCR_DE | CHCR_IE;
+ ctrl_outl(chcr, CHCR[chan->chan]);
+
+ enable_irq(irq);
+}
+
+static void sh_dmac_disable_dma(struct dma_channel *chan)
+{
+ int irq = get_dmte_irq(chan->chan);
+ u32 chcr;
+
+ disable_irq(irq);
+
+ chcr = ctrl_inl(CHCR[chan->chan]);
+ chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
+ ctrl_outl(chcr, CHCR[chan->chan]);
+}
+
+static int sh_dmac_xfer_dma(struct dma_channel *chan)
+{
+ /*
+ * If we haven't pre-configured the channel with special flags, use
+ * the defaults.
+ */
+ if (!(chan->flags & DMA_CONFIGURED))
+ sh_dmac_configure_channel(chan, 0);
+
+ sh_dmac_disable_dma(chan);
+
+ /*
+ * Single-address mode usage note!
+ *
+ * It's important that we don't accidentally write any value to SAR/DAR
+ * (this includes 0) that hasn't been directly specified by the user if
+ * we're in single-address mode.
+ *
+ * In this case, only one address can be defined, anything else will
+ * result in a DMA address error interrupt (at least on the SH-4),
+ * which will subsequently halt the transfer.
+ *
+ * Channel 2 on the Dreamcast is a special case, as this is used for
+ * cascading to the PVR2 DMAC. In this case, we still need to write
+ * SAR and DAR, regardless of value, in order for cascading to work.
+ */
+ if (chan->sar || (mach_is_dreamcast() && chan->chan == 2))
+ ctrl_outl(chan->sar, SAR[chan->chan]);
+ if (chan->dar || (mach_is_dreamcast() && chan->chan == 2))
+ ctrl_outl(chan->dar, DAR[chan->chan]);
+
+ ctrl_outl(chan->count >> calc_xmit_shift(chan), DMATCR[chan->chan]);
+
+ sh_dmac_enable_dma(chan);
+
+ return 0;
+}
+
+static int sh_dmac_get_dma_residue(struct dma_channel *chan)
+{
+ if (!(ctrl_inl(CHCR[chan->chan]) & CHCR_DE))
+ return 0;
+
+ return ctrl_inl(DMATCR[chan->chan]) << calc_xmit_shift(chan);
+}
+
+#if defined(CONFIG_CPU_SH4)
+static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned long dmaor = ctrl_inl(DMAOR);
+
+ printk("DMAE: DMAOR=%lx\n", dmaor);
+
+ ctrl_outl(ctrl_inl(DMAOR)&~DMAOR_NMIF, DMAOR);
+ ctrl_outl(ctrl_inl(DMAOR)&~DMAOR_AE, DMAOR);
+ ctrl_outl(ctrl_inl(DMAOR)|DMAOR_DME, DMAOR);
+
+ disable_irq(irq);
+
+ return IRQ_HANDLED;
+}
+#endif
+
+static struct dma_ops sh_dmac_ops = {
+ .request = sh_dmac_request_dma,
+ .free = sh_dmac_free_dma,
+ .get_residue = sh_dmac_get_dma_residue,
+ .xfer = sh_dmac_xfer_dma,
+ .configure = sh_dmac_configure_channel,
+};
+
+static struct dma_info sh_dmac_info = {
+ .name = "SuperH DMAC",
+ .nr_channels = 4,
+ .ops = &sh_dmac_ops,
+ .flags = DMAC_CHANNELS_TEI_CAPABLE,
+};
+
+static int __init sh_dmac_init(void)
+{
+ struct dma_info *info = &sh_dmac_info;
+ int i;
+
+#ifdef CONFIG_CPU_SH4
+ make_ipr_irq(DMAE_IRQ, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY);
+ i = request_irq(DMAE_IRQ, dma_err, SA_INTERRUPT, "DMAC Address Error", 0);
+ if (i < 0)
+ return i;
+#endif
+
+ for (i = 0; i < info->nr_channels; i++) {
+ int irq = get_dmte_irq(i);
+
+ make_ipr_irq(irq, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY);
+ }
+
+ ctrl_outl(0x8000 | DMAOR_DME, DMAOR);
+
+ return register_dmac(info);
+}
+
+static void __exit sh_dmac_exit(void)
+{
+#ifdef CONFIG_CPU_SH4
+ free_irq(DMAE_IRQ, 0);
+#endif
+}
+
+subsys_initcall(sh_dmac_init);
+module_exit(sh_dmac_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/arch/sh/drivers/dma/dma-sh.h b/arch/sh/drivers/dma/dma-sh.h
new file mode 100644
index 000000000000..dd9d547539a2
--- /dev/null
+++ b/arch/sh/drivers/dma/dma-sh.h
@@ -0,0 +1,52 @@
+/*
+ * arch/sh/drivers/dma/dma-sh.h
+ *
+ * Copyright (C) 2000 Takashi YOSHII
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __DMA_SH_H
+#define __DMA_SH_H
+
+/* Definitions for the SuperH DMAC */
+#define REQ_L 0x00000000
+#define REQ_E 0x00080000
+#define RACK_H 0x00000000
+#define RACK_L 0x00040000
+#define ACK_R 0x00000000
+#define ACK_W 0x00020000
+#define ACK_H 0x00000000
+#define ACK_L 0x00010000
+#define DM_INC 0x00004000
+#define DM_DEC 0x00008000
+#define SM_INC 0x00001000
+#define SM_DEC 0x00002000
+#define RS_IN 0x00000200
+#define RS_OUT 0x00000300
+#define TM_BURST 0x0000080
+#define TS_8 0x00000010
+#define TS_16 0x00000020
+#define TS_32 0x00000030
+#define TS_64 0x00000000
+#define TS_BLK 0x00000040
+#define CHCR_DE 0x00000001
+#define CHCR_TE 0x00000002
+#define CHCR_IE 0x00000004
+
+/* Define the default configuration for dual address memory-memory transfer.
+ * The 0x400 value represents auto-request, external->external.
+ */
+#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_32)
+
+#define DMAOR_COD 0x00000008
+#define DMAOR_AE 0x00000004
+#define DMAOR_NMIF 0x00000002
+#define DMAOR_DME 0x00000001
+
+#define MAX_DMAC_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS)
+
+#endif /* __DMA_SH_H */
+
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
new file mode 100644
index 000000000000..71a6d4e7809f
--- /dev/null
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -0,0 +1,133 @@
+/*
+ * arch/sh/drivers/dma/dma-sysfs.c
+ *
+ * sysfs interface for SH DMA API
+ *
+ * Copyright (C) 2004 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sysdev.h>
+#include <linux/module.h>
+#include <asm/dma.h>
+
+static struct sysdev_class dma_sysclass = {
+ set_kset_name("dma"),
+};
+
+EXPORT_SYMBOL(dma_sysclass);
+
+static ssize_t dma_show_devices(struct sys_device *dev, char *buf)
+{
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ struct dma_info *info = get_dma_info(i);
+ struct dma_channel *channel = &info->channels[i];
+
+ len += sprintf(buf + len, "%2d: %14s %s\n",
+ channel->chan, info->name,
+ channel->dev_id);
+ }
+
+ return len;
+}
+
+static SYSDEV_ATTR(devices, S_IRUGO, dma_show_devices, NULL);
+
+static int __init dma_sysclass_init(void)
+{
+ int ret;
+
+ ret = sysdev_class_register(&dma_sysclass);
+ if (ret == 0)
+ sysfs_create_file(&dma_sysclass.kset.kobj, &attr_devices.attr);
+
+ return ret;
+}
+
+postcore_initcall(dma_sysclass_init);
+
+static ssize_t dma_show_dev_id(struct sys_device *dev, char *buf)
+{
+ struct dma_channel *channel = to_dma_channel(dev);
+ return sprintf(buf, "%s\n", channel->dev_id);
+}
+
+static ssize_t dma_store_dev_id(struct sys_device *dev,
+ const char *buf, size_t count)
+{
+ struct dma_channel *channel = to_dma_channel(dev);
+ strcpy(channel->dev_id, buf);
+ return count;
+}
+
+static SYSDEV_ATTR(dev_id, S_IRUGO | S_IWUSR, dma_show_dev_id, dma_store_dev_id);
+
+static ssize_t dma_store_config(struct sys_device *dev,
+ const char *buf, size_t count)
+{
+ struct dma_channel *channel = to_dma_channel(dev);
+ unsigned long config;
+
+ config = simple_strtoul(buf, NULL, 0);
+ dma_configure_channel(channel->chan, config);
+
+ return count;
+}
+
+static SYSDEV_ATTR(config, S_IWUSR, NULL, dma_store_config);
+
+static ssize_t dma_show_mode(struct sys_device *dev, char *buf)
+{
+ struct dma_channel *channel = to_dma_channel(dev);
+ return sprintf(buf, "0x%08x\n", channel->mode);
+}
+
+static ssize_t dma_store_mode(struct sys_device *dev,
+ const char *buf, size_t count)
+{
+ struct dma_channel *channel = to_dma_channel(dev);
+ channel->mode = simple_strtoul(buf, NULL, 0);
+ return count;
+}
+
+static SYSDEV_ATTR(mode, S_IRUGO | S_IWUSR, dma_show_mode, dma_store_mode);
+
+#define dma_ro_attr(field, fmt) \
+static ssize_t dma_show_##field(struct sys_device *dev, char *buf) \
+{ \
+ struct dma_channel *channel = to_dma_channel(dev); \
+ return sprintf(buf, fmt, channel->field); \
+} \
+static SYSDEV_ATTR(field, S_IRUGO, dma_show_##field, NULL);
+
+dma_ro_attr(count, "0x%08x\n");
+dma_ro_attr(flags, "0x%08lx\n");
+
+int __init dma_create_sysfs_files(struct dma_channel *chan)
+{
+ struct sys_device *dev = &chan->dev;
+ int ret;
+
+ dev->id = chan->chan;
+ dev->cls = &dma_sysclass;
+
+ ret = sysdev_register(dev);
+ if (ret)
+ return ret;
+
+ sysdev_create_file(dev, &attr_dev_id);
+ sysdev_create_file(dev, &attr_count);
+ sysdev_create_file(dev, &attr_mode);
+ sysdev_create_file(dev, &attr_flags);
+ sysdev_create_file(dev, &attr_config);
+
+ return 0;
+}
+