diff options
author | Dave Airlie <airlied@redhat.com> | 2011-12-21 11:23:44 +0000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2012-05-23 10:46:05 +0100 |
commit | 96503f592fd729f296f5870a57be0417eeffc92a (patch) | |
tree | f83af0f69cd4ca5023d44c7706ee4459918499b2 /drivers | |
parent | 0ff926c7d4f06f9703226dc09acad17e86f169d6 (diff) |
udl: add prime fd->handle support.
udl can only be used as an output offload so doesn't need to support
handle->fd direction.
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/udl/udl_drv.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/udl/udl_drv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/udl/udl_fb.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/udl/udl_gem.c | 75 |
4 files changed, 92 insertions, 1 deletions
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 82e6921e6865..4d02c46a9420 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -57,7 +57,7 @@ static const struct file_operations udl_driver_fops = { }; static struct drm_driver driver = { - .driver_features = DRIVER_MODESET | DRIVER_GEM, + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, .load = udl_driver_load, .unload = udl_driver_unload, @@ -70,6 +70,10 @@ static struct drm_driver driver = { .dumb_map_offset = udl_gem_mmap, .dumb_destroy = udl_dumb_destroy, .fops = &udl_driver_fops, + + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = udl_gem_prime_import, + .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 96820d03a303..fccd361f7b50 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -66,6 +66,7 @@ struct udl_gem_object { struct drm_gem_object base; struct page **pages; void *vmapping; + struct sg_table *sg; }; #define to_udl_bo(x) container_of(x, struct udl_gem_object, base) @@ -118,6 +119,8 @@ int udl_gem_init_object(struct drm_gem_object *obj); void udl_gem_free_object(struct drm_gem_object *gem_obj); struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, size_t size); +struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); int udl_gem_vmap(struct udl_gem_object *obj); void udl_gem_vunmap(struct udl_gem_object *obj); diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 4d9c3a5d8a45..a029ee39b0c5 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -593,11 +593,20 @@ udl_fb_user_fb_create(struct drm_device *dev, struct drm_gem_object *obj; struct udl_framebuffer *ufb; int ret; + uint32_t size; obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]); if (obj == NULL) return ERR_PTR(-ENOENT); + size = mode_cmd->pitches[0] * mode_cmd->height; + size = ALIGN(size, PAGE_SIZE); + + if (size > obj->size) { + DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height); + return ERR_PTR(-ENOMEM); + } + ufb = kzalloc(sizeof(*ufb), GFP_KERNEL); if (ufb == NULL) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 92f19ef329b0..40efd32f7dce 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -9,6 +9,7 @@ #include "drmP.h" #include "udl_drv.h" #include <linux/shmem_fs.h> +#include <linux/dma-buf.h> struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, size_t size) @@ -161,6 +162,12 @@ static void udl_gem_put_pages(struct udl_gem_object *obj) int page_count = obj->base.size / PAGE_SIZE; int i; + if (obj->base.import_attach) { + drm_free_large(obj->pages); + obj->pages = NULL; + return; + } + for (i = 0; i < page_count; i++) page_cache_release(obj->pages[i]); @@ -195,6 +202,9 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj) { struct udl_gem_object *obj = to_udl_bo(gem_obj); + if (gem_obj->import_attach) + drm_prime_gem_destroy(gem_obj, obj->sg); + if (obj->vmapping) udl_gem_vunmap(obj); @@ -239,3 +249,68 @@ unlock: mutex_unlock(&dev->struct_mutex); return ret; } + +static int udl_prime_create(struct drm_device *dev, + size_t size, + struct sg_table *sg, + struct udl_gem_object **obj_p) +{ + struct udl_gem_object *obj; + int npages; + int i; + struct scatterlist *iter; + + npages = size / PAGE_SIZE; + + *obj_p = NULL; + obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE); + if (!obj) + return -ENOMEM; + + obj->sg = sg; + obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); + if (obj->pages == NULL) { + DRM_ERROR("obj pages is NULL %d\n", npages); + return -ENOMEM; + } + + drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); + + *obj_p = obj; + return 0; +} + +struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct dma_buf_attachment *attach; + struct sg_table *sg; + struct udl_gem_object *uobj; + int ret; + + /* need to attach */ + attach = dma_buf_attach(dma_buf, dev->dev); + if (IS_ERR(attach)) + return ERR_PTR(PTR_ERR(attach)); + + sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto fail_detach; + } + + ret = udl_prime_create(dev, dma_buf->size, sg, &uobj); + if (ret) { + goto fail_unmap; + } + + uobj->base.import_attach = attach; + + return &uobj->base; + +fail_unmap: + dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); +fail_detach: + dma_buf_detach(dma_buf, attach); + return ERR_PTR(ret); +} |