diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/dma-buf.h | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 24e0f4828711..ee7ef9990d9a 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -50,6 +50,17 @@ struct dma_buf_attachment; * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter * pages. * @release: release this buffer; to be called after the last dma_buf_put. + * @begin_cpu_access: [optional] called before cpu access to invalidate cpu + * caches and allocate backing storage (if not yet done) + * respectively pin the objet into memory. + * @end_cpu_access: [optional] called after cpu access to flush cashes. + * @kmap_atomic: maps a page from the buffer into kernel address + * space, users may not block until the subsequent unmap call. + * This callback must not sleep. + * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer. + * This Callback must not sleep. + * @kmap: maps a page from the buffer into kernel address space. + * @kunmap: [optional] unmaps a page from the buffer. */ struct dma_buf_ops { int (*attach)(struct dma_buf *, struct device *, @@ -73,6 +84,14 @@ struct dma_buf_ops { /* after final dma_buf_put() */ void (*release)(struct dma_buf *); + int (*begin_cpu_access)(struct dma_buf *, size_t, size_t, + enum dma_data_direction); + void (*end_cpu_access)(struct dma_buf *, size_t, size_t, + enum dma_data_direction); + void *(*kmap_atomic)(struct dma_buf *, unsigned long); + void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); + void *(*kmap)(struct dma_buf *, unsigned long); + void (*kunmap)(struct dma_buf *, unsigned long, void *); }; /** @@ -140,6 +159,14 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); +int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, + enum dma_data_direction dir); +void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, + enum dma_data_direction dir); +void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); +void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); +void *dma_buf_kmap(struct dma_buf *, unsigned long); +void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); #else static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, @@ -188,6 +215,38 @@ static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, return; } +static inline int dma_buf_begin_cpu_access(struct dma_buf *, + size_t, size_t, + enum dma_data_direction) +{ + return -ENODEV; +} + +static inline void dma_buf_end_cpu_access(struct dma_buf *, + size_t, size_t, + enum dma_data_direction) +{ +} + +static inline void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long) +{ + return NULL; +} + +static inline void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, + void *) +{ +} + +static inline void *dma_buf_kmap(struct dma_buf *, unsigned long) +{ + return NULL; +} + +static inline void dma_buf_kunmap(struct dma_buf *, unsigned long, + void *) +{ +} #endif /* CONFIG_DMA_SHARED_BUFFER */ #endif /* __DMA_BUF_H__ */ |