summaryrefslogtreecommitdiff
path: root/include/linux/dmaengine.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r--include/linux/dmaengine.h198
1 files changed, 191 insertions, 7 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 20ea12c86fd0..c61d4ca27bcc 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -40,11 +40,13 @@ typedef s32 dma_cookie_t;
* enum dma_status - DMA transaction status
* @DMA_SUCCESS: transaction completed successfully
* @DMA_IN_PROGRESS: transaction not yet processed
+ * @DMA_PAUSED: transaction is paused
* @DMA_ERROR: transaction failed
*/
enum dma_status {
DMA_SUCCESS,
DMA_IN_PROGRESS,
+ DMA_PAUSED,
DMA_ERROR,
};
@@ -107,6 +109,25 @@ enum dma_ctrl_flags {
};
/**
+ * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
+ * on a running channel.
+ * @DMA_TERMINATE_ALL: terminate all ongoing transfers
+ * @DMA_PAUSE: pause ongoing transfers
+ * @DMA_RESUME: resume paused transfer
+ * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
+ * that need to runtime reconfigure the slave channels (as opposed to passing
+ * configuration data in statically from the platform). An additional
+ * argument of struct dma_slave_config must be passed in with this
+ * command.
+ */
+enum dma_ctrl_cmd {
+ DMA_TERMINATE_ALL,
+ DMA_PAUSE,
+ DMA_RESUME,
+ DMA_SLAVE_CONFIG,
+};
+
+/**
* enum sum_check_bits - bit position of pq_check_flags
*/
enum sum_check_bits {
@@ -184,6 +205,71 @@ struct dma_chan_dev {
atomic_t *idr_ref;
};
+/**
+ * enum dma_slave_buswidth - defines bus with of the DMA slave
+ * device, source or target buses
+ */
+enum dma_slave_buswidth {
+ DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
+ DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
+ DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
+ DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
+ DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
+};
+
+/**
+ * struct dma_slave_config - dma slave channel runtime config
+ * @direction: whether the data shall go in or out on this slave
+ * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
+ * legal values, DMA_BIDIRECTIONAL is not acceptable since we
+ * need to differentiate source and target addresses.
+ * @src_addr: this is the physical address where DMA slave data
+ * should be read (RX), if the source is memory this argument is
+ * ignored.
+ * @dst_addr: this is the physical address where DMA slave data
+ * should be written (TX), if the source is memory this argument
+ * is ignored.
+ * @src_addr_width: this is the width in bytes of the source (RX)
+ * register where DMA data shall be read. If the source
+ * is memory this may be ignored depending on architecture.
+ * Legal values: 1, 2, 4, 8.
+ * @dst_addr_width: same as src_addr_width but for destination
+ * target (TX) mutatis mutandis.
+ * @src_maxburst: the maximum number of words (note: words, as in
+ * units of the src_addr_width member, not bytes) that can be sent
+ * in one burst to the device. Typically something like half the
+ * FIFO depth on I/O peripherals so you don't overflow it. This
+ * may or may not be applicable on memory sources.
+ * @dst_maxburst: same as src_maxburst but for destination target
+ * mutatis mutandis.
+ *
+ * This struct is passed in as configuration data to a DMA engine
+ * in order to set up a certain channel for DMA transport at runtime.
+ * The DMA device/engine has to provide support for an additional
+ * command in the channel config interface, DMA_SLAVE_CONFIG
+ * and this struct will then be passed in as an argument to the
+ * DMA engine device_control() function.
+ *
+ * The rationale for adding configuration information to this struct
+ * is as follows: if it is likely that most DMA slave controllers in
+ * the world will support the configuration option, then make it
+ * generic. If not: if it is fixed so that it be sent in static from
+ * the platform data, then prefer to do that. Else, if it is neither
+ * fixed at runtime, nor generic enough (such as bus mastership on
+ * some CPU family and whatnot) then create a custom slave config
+ * struct and pass that, then make this config a member of that
+ * struct, if applicable.
+ */
+struct dma_slave_config {
+ enum dma_data_direction direction;
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ enum dma_slave_buswidth src_addr_width;
+ enum dma_slave_buswidth dst_addr_width;
+ u32 src_maxburst;
+ u32 dst_maxburst;
+};
+
static inline const char *dma_chan_name(struct dma_chan *chan)
{
return dev_name(&chan->dev->device);
@@ -230,9 +316,84 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
+#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
spinlock_t lock;
+#endif
+};
+
+#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+{
+ BUG();
+}
+static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+{
+ return NULL;
+}
+static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+{
+ return NULL;
+}
+
+#else
+static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+{
+ spin_lock_bh(&txd->lock);
+}
+static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+{
+ spin_unlock_bh(&txd->lock);
+}
+static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+{
+ txd->next = next;
+ next->parent = txd;
+}
+static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+{
+ txd->parent = NULL;
+}
+static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+{
+ txd->next = NULL;
+}
+static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+{
+ return txd->parent;
+}
+static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+{
+ return txd->next;
+}
+#endif
+
+/**
+ * struct dma_tx_state - filled in to report the status of
+ * a transfer.
+ * @last: last completed DMA cookie
+ * @used: last issued DMA cookie (i.e. the one in progress)
+ * @residue: the remaining number of bytes left to transmit
+ * on the selected transfer for states DMA_IN_PROGRESS and
+ * DMA_PAUSED if this is implemented in the driver, else 0
+ */
+struct dma_tx_state {
+ dma_cookie_t last;
+ dma_cookie_t used;
+ u32 residue;
};
/**
@@ -261,8 +422,12 @@ struct dma_async_tx_descriptor {
* @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation
- * @device_terminate_all: terminate all pending operations
- * @device_is_tx_complete: poll for transaction completion
+ * @device_control: manipulate all pending operations on a channel, returns
+ * zero or error code
+ * @device_tx_status: poll for transaction completion, the optional
+ * txstate parameter can be supplied with a pointer to get a
+ * struct with auxilary transfer status information, otherwise the call
+ * will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
*/
struct dma_device {
@@ -313,11 +478,12 @@ struct dma_device {
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags);
- void (*device_terminate_all)(struct dma_chan *chan);
+ int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg);
- enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *last,
- dma_cookie_t *used);
+ enum dma_status (*device_tx_status)(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
};
@@ -558,7 +724,15 @@ static inline void dma_async_issue_pending(struct dma_chan *chan)
static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
{
- return chan->device->device_is_tx_complete(chan, cookie, last, used);
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = chan->device->device_tx_status(chan, cookie, &state);
+ if (last)
+ *last = state.last;
+ if (used)
+ *used = state.used;
+ return status;
}
#define dma_async_memcpy_complete(chan, cookie, last, used)\
@@ -586,6 +760,16 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
return DMA_IN_PROGRESS;
}
+static inline void
+dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
+{
+ if (st) {
+ st->last = last;
+ st->used = used;
+ st->residue = residue;
+ }
+}
+
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
#ifdef CONFIG_DMA_ENGINE
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);