diff options
author | Dave Airlie <airlied@redhat.com> | 2017-08-17 07:33:41 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-08-17 07:33:41 +1000 |
commit | 3154b133711f70bb50f513773947a8a647d24310 (patch) | |
tree | be1284614bc52cec292ebc150d9983ca08af34cc | |
parent | efa479352fc780b305fa186cafb5f416fdf2b2cb (diff) | |
parent | d956e1293b9b43f3a9a508162cdbaa96cf02e6e0 (diff) |
Merge tag 'drm-misc-next-2017-08-16' of git://anongit.freedesktop.org/git/drm-misc into drm-next
UAPI Changes:
- vc4: Allow userspace to dictate rendering order in submit_cl ioctl (Eric)
Cross-subsystem Changes:
- vboxvideo: One of Cihangir's patches applies to vboxvideo which is maintained
in staging
Core Changes:
- atomic_legacy_backoff is officially killed (Daniel)
- Extract drm_device.h (Daniel)
- Unregister drm device on unplug (Daniel)
- Rename deprecated drm_*_(un)?reference functions to drm_*_{get|put} (Cihangir)
Driver Changes:
- vc4: Error/destroy path cleanups, log level demotion, edid leak (Eric)
- various: Make various drm_*_funcs structs const (Bhumika)
- tinydrm: add support for LEGO MINDSTORMS EV3 LCD (David)
- various: Second half of .dumb_{map_offset|destroy} defaults set (Noralf)
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Eric Anholt <eric@anholt.net>
Cc: Bhumika Goyal <bhumirks@gmail.com>
Cc: Cihangir Akturk <cakturk@gmail.com>
Cc: David Lechner <david@lechnology.com>
Cc: Noralf Trønnes <noralf@tronnes.org>
* tag 'drm-misc-next-2017-08-16' of git://anongit.freedesktop.org/git/drm-misc: (50 commits)
drm/gem-cma-helper: Remove drm_gem_cma_dumb_map_offset()
drm/virtio: Use the drm_driver.dumb_destroy default
drm/bochs: Use the drm_driver.dumb_destroy default
drm/mgag200: Use the drm_driver.dumb_destroy default
drm/exynos: Use .dumb_map_offset and .dumb_destroy defaults
drm/msm: Use the drm_driver.dumb_destroy default
drm/ast: Use the drm_driver.dumb_destroy default
drm/qxl: Use the drm_driver.dumb_destroy default
drm/udl: Use the drm_driver.dumb_destroy default
drm/cirrus: Use the drm_driver.dumb_destroy default
drm/tegra: Use .dumb_map_offset and .dumb_destroy defaults
drm/gma500: Use .dumb_map_offset and .dumb_destroy defaults
drm/mxsfb: Use .dumb_map_offset and .dumb_destroy defaults
drm/meson: Use .dumb_map_offset and .dumb_destroy defaults
drm/kirin: Use .dumb_map_offset and .dumb_destroy defaults
drm/vc4: Continue the switch to drm_*_put() helpers
drm/vc4: Fix leak of HDMI EDID
dma-buf: fix reservation_object_wait_timeout_rcu to wait correctly v2
dma-buf: add reservation_object_copy_fences (v2)
drm/tinydrm: add support for LEGO MINDSTORMS EV3 LCD
...
88 files changed, 1059 insertions, 628 deletions
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7586.txt b/Documentation/devicetree/bindings/display/sitronix,st7586.txt new file mode 100644 index 000000000000..1d0dad1210d3 --- /dev/null +++ b/Documentation/devicetree/bindings/display/sitronix,st7586.txt @@ -0,0 +1,22 @@ +Sitronix ST7586 display panel + +Required properties: +- compatible: "lego,ev3-lcd". +- a0-gpios: The A0 signal (since this binding is for serial mode, this is + the pin labeled D1 on the controller, not the pin labeled A0) +- reset-gpios: Reset pin + +The node for this driver must be a child node of a SPI controller, hence +all mandatory properties described in ../spi/spi-bus.txt must be specified. + +Optional properties: +- rotation: panel rotation in degrees counter clockwise (0,90,180,270) + +Example: + display@0{ + compatible = "lego,ev3-lcd"; + reg = <0>; + spi-max-frequency = <10000000>; + a0-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>; + }; diff --git a/MAINTAINERS b/MAINTAINERS index a6c2306d3842..fdba5f624480 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4380,6 +4380,12 @@ S: Orphan / Obsolete F: drivers/gpu/drm/sis/ F: include/uapi/drm/sis_drm.h +DRM DRIVER FOR SITRONIX ST7586 PANELS +M: David Lechner <david@lechnology.com> +S: Maintained +F: drivers/gpu/drm/tinydrm/st7586.c +F: Documentation/devicetree/bindings/display/st7586.txt + DRM DRIVER FOR TDFX VIDEO CARDS S: Orphan / Obsolete F: drivers/gpu/drm/tdfx/ diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 393817e849ed..dec3a815455d 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -195,8 +195,7 @@ done: if (old) kfree_rcu(old, rcu); - if (old_fence) - dma_fence_put(old_fence); + dma_fence_put(old_fence); } /** @@ -258,12 +257,71 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, dma_fence_put(rcu_dereference_protected(old->shared[i], reservation_object_held(obj))); - if (old_fence) - dma_fence_put(old_fence); + dma_fence_put(old_fence); } EXPORT_SYMBOL(reservation_object_add_excl_fence); /** +* reservation_object_copy_fences - Copy all fences from src to dst. +* @dst: the destination reservation object +* @src: the source reservation object +* +* Copy all fences from src to dst. Both src->lock as well as dst-lock must be +* held. +*/ +int reservation_object_copy_fences(struct reservation_object *dst, + struct reservation_object *src) +{ + struct reservation_object_list *src_list, *dst_list; + struct dma_fence *old, *new; + size_t size; + unsigned i; + + src_list = reservation_object_get_list(src); + + if (src_list) { + size = offsetof(typeof(*src_list), + shared[src_list->shared_count]); + dst_list = kmalloc(size, GFP_KERNEL); + if (!dst_list) + return -ENOMEM; + + dst_list->shared_count = src_list->shared_count; + dst_list->shared_max = src_list->shared_count; + for (i = 0; i < src_list->shared_count; ++i) + dst_list->shared[i] = + dma_fence_get(src_list->shared[i]); + } else { + dst_list = NULL; + } + + kfree(dst->staged); + dst->staged = NULL; + + src_list = reservation_object_get_list(dst); + + old = reservation_object_get_excl(dst); + new = reservation_object_get_excl(src); + + dma_fence_get(new); + + preempt_disable(); + write_seqcount_begin(&dst->seq); + /* write_seqcount_begin provides the necessary memory barrier */ + RCU_INIT_POINTER(dst->fence_excl, new); + RCU_INIT_POINTER(dst->fence, dst_list); + write_seqcount_end(&dst->seq); + preempt_enable(); + + if (src_list) + kfree_rcu(src_list, rcu); + dma_fence_put(old); + + return 0; +} +EXPORT_SYMBOL(reservation_object_copy_fences); + +/** * reservation_object_get_fences_rcu - Get an object's shared and exclusive * fences without update side lock held * @obj: the reservation object @@ -373,12 +431,25 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, long ret = timeout ? timeout : 1; retry: - fence = NULL; shared_count = 0; seq = read_seqcount_begin(&obj->seq); rcu_read_lock(); - if (wait_all) { + fence = rcu_dereference(obj->fence_excl); + if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + if (!dma_fence_get_rcu(fence)) + goto unlock_retry; + + if (dma_fence_is_signaled(fence)) { + dma_fence_put(fence); + fence = NULL; + } + + } else { + fence = NULL; + } + + if (!fence && wait_all) { struct reservation_object_list *fobj = rcu_dereference(obj->fence); @@ -405,22 +476,6 @@ retry: } } - if (!shared_count) { - struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); - - if (fence_excl && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &fence_excl->flags)) { - if (!dma_fence_get_rcu(fence_excl)) - goto unlock_retry; - - if (dma_fence_is_signaled(fence_excl)) - dma_fence_put(fence_excl); - else - fence = fence_excl; - } - } - rcu_read_unlock(); if (fence) { if (read_seqcount_retry(&obj->seq, seq)) { diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c index e3c13aa202b8..289eda54e5aa 100644 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ b/drivers/gpu/drm/arc/arcpgu_drv.c @@ -31,7 +31,7 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev) drm_fbdev_cma_hotplug_event(arcpgu->fbdev); } -static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { +static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = arcpgu_fb_output_poll_changed, .atomic_check = drm_atomic_helper_check, diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 3022b39c00f3..69dab82a3771 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -209,7 +209,6 @@ static struct drm_driver driver = { .gem_free_object_unlocked = ast_gem_free_object, .dumb_create = ast_dumb_create, .dumb_map_offset = ast_dumb_mmap_offset, - .dumb_destroy = drm_gem_dumb_destroy, }; diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 9052ebeae8d0..0cd827e11fa2 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -266,7 +266,7 @@ static void ast_fbdev_destroy(struct drm_device *dev, drm_fb_helper_unregister_fbi(&afbdev->helper); if (afb->obj) { - drm_gem_object_unreference_unlocked(afb->obj); + drm_gem_object_put_unlocked(afb->obj); afb->obj = NULL; } drm_fb_helper_fini(&afbdev->helper); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 9a44cdec3bca..dac355812adc 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -387,7 +387,7 @@ static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb); - drm_gem_object_unreference_unlocked(ast_fb->obj); + drm_gem_object_put_unlocked(ast_fb->obj); drm_framebuffer_cleanup(fb); kfree(ast_fb); } @@ -429,13 +429,13 @@ ast_user_framebuffer_create(struct drm_device *dev, ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL); if (!ast_fb) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(-ENOMEM); } ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj); if (ret) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); kfree(ast_fb); return ERR_PTR(ret); } @@ -628,7 +628,7 @@ int ast_dumb_create(struct drm_file *file, return ret; ret = drm_gem_handle_create(file, gobj, &handle); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (ret) return ret; @@ -676,7 +676,7 @@ ast_dumb_mmap_offset(struct drm_file *file, bo = gem_to_ast_bo(obj); *offset = ast_bo_mmap_offset(bo); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 43245229f437..6f3849ec0c1d 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -950,7 +950,7 @@ static void ast_cursor_fini(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; ttm_bo_kunmap(&ast->cache_kmap); - drm_gem_object_unreference_unlocked(ast->cursor_cache); + drm_gem_object_put_unlocked(ast->cursor_cache); } int ast_mode_init(struct drm_device *dev) @@ -1215,10 +1215,10 @@ static int ast_cursor_set(struct drm_crtc *crtc, ast_show_cursor(crtc); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; fail: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index a1d28845da5f..7b20318483e4 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -93,7 +93,6 @@ static struct drm_driver bochs_driver = { .gem_free_object_unlocked = bochs_gem_free_object, .dumb_create = bochs_dumb_create, .dumb_map_offset = bochs_dumb_mmap_offset, - .dumb_destroy = drm_gem_dumb_destroy, }; /* ---------------------------------------------------------------------- */ diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 682c090fa3ed..b2431aee7887 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -785,7 +785,7 @@ adv7511_connector_detect(struct drm_connector *connector, bool force) return adv7511_detect(adv, connector); } -static struct drm_connector_funcs adv7511_connector_funcs = { +static const struct drm_connector_funcs adv7511_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .detect = adv7511_connector_detect, .destroy = drm_connector_cleanup, @@ -856,7 +856,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge) return ret; } -static struct drm_bridge_funcs adv7511_bridge_funcs = { +static const struct drm_bridge_funcs adv7511_bridge_funcs = { .enable = adv7511_bridge_enable, .disable = adv7511_bridge_disable, .mode_set = adv7511_bridge_mode_set, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index 8f2d1379c880..cf3f0caf9c63 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -517,7 +517,7 @@ static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream) return bytes_to_frames(runtime, dw->buf_offset); } -static struct snd_pcm_ops snd_dw_hdmi_ops = { +static const struct snd_pcm_ops snd_dw_hdmi_ops = { .open = dw_hdmi_open, .close = dw_hdmi_close, .ioctl = snd_pcm_lib_ioctl, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index 36f5ccbd1794..63c7a01b7053 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -811,7 +811,7 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge) return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge); } -static struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = { +static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = { .mode_set = dw_mipi_dsi_bridge_mode_set, .enable = dw_mipi_dsi_bridge_enable, .post_disable = dw_mipi_dsi_bridge_post_disable, diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index 910c300f5c37..69c4e352dd78 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -142,7 +142,6 @@ static struct drm_driver driver = { .gem_free_object_unlocked = cirrus_gem_free_object, .dumb_create = cirrus_dumb_create, .dumb_map_offset = cirrus_dumb_mmap_offset, - .dumb_destroy = drm_gem_dumb_destroy, }; static const struct dev_pm_ops cirrus_pm_ops = { diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 0f6815f35ad2..32fbfba2c623 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -251,7 +251,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev, drm_fb_helper_unregister_fbi(&gfbdev->helper); if (gfb->obj) { - drm_gem_object_unreference_unlocked(gfb->obj); + drm_gem_object_put_unlocked(gfb->obj); gfb->obj = NULL; } diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index e7fc95f63dca..b5f528543956 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -18,7 +18,7 @@ static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb); - drm_gem_object_unreference_unlocked(cirrus_fb->obj); + drm_gem_object_put_unlocked(cirrus_fb->obj); drm_framebuffer_cleanup(fb); kfree(fb); } @@ -67,13 +67,13 @@ cirrus_user_framebuffer_create(struct drm_device *dev, cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL); if (!cirrus_fb) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(-ENOMEM); } ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj); if (ret) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); kfree(cirrus_fb); return ERR_PTR(ret); } @@ -261,7 +261,7 @@ int cirrus_dumb_create(struct drm_file *file, return ret; ret = drm_gem_handle_create(file, gobj, &handle); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (ret) return ret; @@ -310,7 +310,7 @@ cirrus_dumb_mmap_offset(struct drm_file *file, bo = gem_to_cirrus_bo(obj); *offset = cirrus_bo_mmap_offset(bo); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 2ed2d919beae..be38ac7050d4 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -291,7 +291,7 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id) if (!minor) { return ERR_PTR(-ENODEV); - } else if (drm_device_is_unplugged(minor->dev)) { + } else if (drm_dev_is_unplugged(minor->dev)) { drm_dev_unref(minor->dev); return ERR_PTR(-ENODEV); } @@ -364,26 +364,32 @@ void drm_put_dev(struct drm_device *dev) } EXPORT_SYMBOL(drm_put_dev); -void drm_unplug_dev(struct drm_device *dev) +static void drm_device_set_unplugged(struct drm_device *dev) { - /* for a USB device */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) - drm_modeset_unregister_all(dev); + smp_wmb(); + atomic_set(&dev->unplugged, 1); +} - drm_minor_unregister(dev, DRM_MINOR_PRIMARY); - drm_minor_unregister(dev, DRM_MINOR_RENDER); - drm_minor_unregister(dev, DRM_MINOR_CONTROL); +/** + * drm_dev_unplug - unplug a DRM device + * @dev: DRM device + * + * This unplugs a hotpluggable DRM device, which makes it inaccessible to + * userspace operations. Entry-points can use drm_dev_is_unplugged(). This + * essentially unregisters the device like drm_dev_unregister(), but can be + * called while there are still open users of @dev. + */ +void drm_dev_unplug(struct drm_device *dev) +{ + drm_dev_unregister(dev); mutex_lock(&drm_global_mutex); - drm_device_set_unplugged(dev); - - if (dev->open_count == 0) { - drm_put_dev(dev); - } + if (dev->open_count == 0) + drm_dev_unref(dev); mutex_unlock(&drm_global_mutex); } -EXPORT_SYMBOL(drm_unplug_dev); +EXPORT_SYMBOL(drm_dev_unplug); /* * DRM internal mount @@ -835,6 +841,9 @@ EXPORT_SYMBOL(drm_dev_register); * drm_dev_register() but does not deallocate the device. The caller must call * drm_dev_unref() to drop their final reference. * + * A special form of unregistering for hotpluggable devices is drm_dev_unplug(), + * which can be called while there are still open users of @dev. + * * This should be called first in the device teardown code to make sure * userspace can't access the device instance any more. */ @@ -842,7 +851,8 @@ void drm_dev_unregister(struct drm_device *dev) { struct drm_map_list *r_list, *list_temp; - drm_lastclose(dev); + if (drm_core_check_feature(dev, DRIVER_LEGACY)) + drm_lastclose(dev); dev->registered = false; diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 59b75a974357..b3c6e997ccdb 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -436,7 +436,7 @@ int drm_release(struct inode *inode, struct file *filp) if (!--dev->open_count) { drm_lastclose(dev); - if (drm_device_is_unplugged(dev)) + if (drm_dev_is_unplugged(dev)) drm_put_dev(dev); } mutex_unlock(&drm_global_mutex); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index a8d396bed6a4..ad4e9cfe48a2 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1001,7 +1001,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) struct drm_vma_offset_node *node; int ret; - if (drm_device_is_unplugged(dev)) + if (drm_dev_is_unplugged(dev)) return -ENODEV; drm_vma_offset_lock_lookup(dev->vma_offset_manager); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 275ab872b34f..373e33f22be4 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -264,41 +264,6 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv, } EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create); -/** - * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM - * object - * @file_priv: DRM file-private structure containing the GEM object - * @drm: DRM device - * @handle: GEM object handle - * @offset: return location for the fake mmap offset - * - * This function look up an object by its handle and returns the fake mmap - * offset associated with it. Drivers using the CMA helpers should set this - * as their &drm_driver.dumb_map_offset callback. - * - * Returns: - * 0 on success or a negative error code on failure. - */ -int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *drm, u32 handle, - u64 *offset) -{ - struct drm_gem_object *gem_obj; - - gem_obj = drm_gem_object_lookup(file_priv, handle); - if (!gem_obj) { - dev_err(drm->dev, "failed to lookup GEM object\n"); - return -EINVAL; - } - - *offset = drm_vma_node_offset_addr(&gem_obj->vma_node); - - drm_gem_object_put_unlocked(gem_obj); - - return 0; -} -EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset); - const struct vm_operations_struct drm_gem_cma_vm_ops = { .open = drm_gem_vm_open, .close = drm_gem_vm_close, @@ -390,7 +355,7 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, struct drm_device *dev = priv->minor->dev; struct drm_vma_offset_node *node; - if (drm_device_is_unplugged(dev)) + if (drm_dev_is_unplugged(dev)) return -ENODEV; drm_vma_offset_lock_lookup(dev->vma_offset_manager); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 8bfeb32f8a10..d920b2118a39 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -716,7 +716,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, struct drm_device *dev = file_priv->minor->dev; int retcode; - if (drm_device_is_unplugged(dev)) + if (drm_dev_is_unplugged(dev)) return -ENODEV; retcode = drm_ioctl_permit(flags, file_priv); @@ -765,7 +765,7 @@ long drm_ioctl(struct file *filp, dev = file_priv->minor->dev; - if (drm_device_is_unplugged(dev)) + if (drm_dev_is_unplugged(dev)) return -ENODEV; is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END; diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 5c14beee52ff..85ab1eec73e5 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -126,7 +126,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane plane->format_types[j], plane->modifiers[i])) { - mod->formats |= 1 << j; + mod->formats |= 1ULL << j; } } diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 1170b3209a12..13a59ed2afbc 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -631,7 +631,7 @@ int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma) struct drm_device *dev = priv->minor->dev; int ret; - if (drm_device_is_unplugged(dev)) + if (drm_dev_is_unplugged(dev)) return -ENODEV; mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index facc8419f0cd..b1f7299600f0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -145,8 +145,6 @@ static struct drm_driver exynos_drm_driver = { .gem_free_object_unlocked = exynos_drm_gem_free_object, .gem_vm_ops = &exynos_drm_gem_vm_ops, .dumb_create = exynos_drm_gem_dumb_create, - .dumb_map_offset = exynos_drm_gem_dumb_map_offset, - .dumb_destroy = drm_gem_dumb_destroy, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index c23479be4850..077de014d610 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -286,8 +286,8 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data, { struct drm_exynos_gem_map *args = data; - return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle, - &args->offset); + return drm_gem_dumb_map_offset(file_priv, dev, args->handle, + &args->offset); } dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, @@ -422,32 +422,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, return 0; } -int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset) -{ - struct drm_gem_object *obj; - int ret = 0; - - /* - * get offset of memory allocated for drm framebuffer. - * - this callback would be called by user application - * with DRM_IOCTL_MODE_MAP_DUMB command. - */ - - obj = drm_gem_object_lookup(file_priv, handle); - if (!obj) { - DRM_ERROR("failed to lookup gem object.\n"); - return -EINVAL; - } - - *offset = drm_vma_node_offset_addr(&obj->vma_node); - DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); - - drm_gem_object_unreference_unlocked(obj); - return ret; -} - int exynos_drm_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 85457255fcd1..e86d1a9518c3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -110,11 +110,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -/* map memory region for drm framebuffer to user space. */ -int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset); - /* page fault handler and mmap fault address(virtual) to physical memory. */ int exynos_drm_gem_fault(struct vm_fault *vmf); diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 7da061aab729..131239759a75 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -48,36 +48,6 @@ int psb_gem_get_aperture(struct drm_device *dev, void *data, } /** - * psb_gem_dumb_map_gtt - buffer mapping for dumb interface - * @file: our drm client file - * @dev: drm device - * @handle: GEM handle to the object (from dumb_create) - * - * Do the necessary setup to allow the mapping of the frame buffer - * into user memory. We don't have to do much here at the moment. - */ -int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, - uint32_t handle, uint64_t *offset) -{ - int ret = 0; - struct drm_gem_object *obj; - - /* GEM does all our handle to object mapping */ - obj = drm_gem_object_lookup(file, handle); - if (obj == NULL) - return -ENOENT; - - /* Make it mmapable */ - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - *offset = drm_vma_node_offset_addr(&obj->vma_node); -out: - drm_gem_object_unreference_unlocked(obj); - return ret; -} - -/** * psb_gem_create - create a mappable object * @file: the DRM file of the client * @dev: our device diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 747c06b227c5..37a3be71acd9 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -494,8 +494,6 @@ static struct drm_driver driver = { .gem_vm_ops = &psb_gem_vm_ops, .dumb_create = psb_gem_dumb_create, - .dumb_map_offset = psb_gem_dumb_map_gtt, - .dumb_destroy = drm_gem_dumb_destroy, .ioctls = psb_ioctls, .fops = &psb_gem_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 83667087d6e5..821497dbd3fc 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -750,8 +750,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data, struct drm_file *file); extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); -extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, - uint32_t handle, uint64_t *offset); extern int psb_gem_fault(struct vm_fault *vmf); /* psb_device.c */ diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c index 9740eed9231a..b92595c477ef 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c @@ -157,7 +157,7 @@ out_unpin_bo: out_unreserve_ttm_bo: ttm_bo_unreserve(&bo->bo); out_unref_gem: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return ret; } @@ -172,7 +172,7 @@ static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev) drm_fb_helper_fini(fbh); if (gfb) - drm_framebuffer_unreference(&gfb->fb); + drm_framebuffer_put(&gfb->fb); } static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = { diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c index ac457c779caa..3518167a7dc4 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c @@ -444,7 +444,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, } ret = drm_gem_handle_create(file, gobj, &handle); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (ret) { DRM_ERROR("failed to unreference GEM object: %d\n", ret); return ret; @@ -479,7 +479,7 @@ int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, bo = gem_to_hibmc_bo(obj); *offset = hibmc_bo_mmap_offset(bo); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; } @@ -487,7 +487,7 @@ static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb); - drm_gem_object_unreference_unlocked(hibmc_fb->obj); + drm_gem_object_put_unlocked(hibmc_fb->obj); drm_framebuffer_cleanup(fb); kfree(hibmc_fb); } @@ -543,7 +543,7 @@ hibmc_user_framebuffer_create(struct drm_device *dev, hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj); if (IS_ERR(hibmc_fb)) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR((long)hibmc_fb); } return &hibmc_fb->fb; diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 79fcce76f2ad..e27352ca26c4 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -157,8 +157,6 @@ static struct drm_driver kirin_drm_driver = { .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = kirin_gem_cma_dumb_create, - .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .dumb_destroy = drm_gem_dumb_destroy, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c index d4246c9dceae..0d8d506695f9 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c @@ -58,7 +58,7 @@ static void mtk_drm_fb_destroy(struct drm_framebuffer *fb) drm_framebuffer_cleanup(fb); - drm_gem_object_unreference_unlocked(mtk_fb->gem_obj); + drm_gem_object_put_unlocked(mtk_fb->gem_obj); kfree(mtk_fb); } @@ -160,6 +160,6 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev, return &mtk_fb->base; unreference: - drm_gem_object_unreference_unlocked(gem); + drm_gem_object_put_unlocked(gem); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 8ec963fff8b1..f595ac816b55 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -122,7 +122,7 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, goto err_handle_create; /* drop reference from allocate - handle holds it now. */ - drm_gem_object_unreference_unlocked(&mtk_gem->base); + drm_gem_object_put_unlocked(&mtk_gem->base); return 0; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 5375e6dccdd7..7742c7d81ed8 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -116,8 +116,6 @@ static struct drm_driver meson_driver = { /* GEM Ops */ .dumb_create = drm_gem_cma_dumb_create, - .dumb_destroy = drm_gem_dumb_destroy, - .dumb_map_offset = drm_gem_cma_dumb_map_offset, .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c index 2ac3fcbfea7b..968e20379d54 100644 --- a/drivers/gpu/drm/mgag200/mgag200_cursor.c +++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c @@ -248,7 +248,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, out_unreserve1: mgag200_bo_unreserve(pixels_2); out_unref: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 4189160af726..74cdde2ee474 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -102,7 +102,6 @@ static struct drm_driver driver = { .gem_free_object_unlocked = mgag200_gem_free_object, .dumb_create = mgag200_dumb_create, .dumb_map_offset = mgag200_dumb_mmap_offset, - .dumb_destroy = drm_gem_dumb_destroy, }; static struct pci_driver mgag200_pci_driver = { diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index 9d914ca69996..30726c9fe28c 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -232,7 +232,7 @@ static int mgag200fb_create(struct drm_fb_helper *helper, err_alloc_fbi: vfree(sysram); err_sysram: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return ret; } @@ -245,7 +245,7 @@ static int mga_fbdev_destroy(struct drm_device *dev, drm_fb_helper_unregister_fbi(&mfbdev->helper); if (mfb->obj) { - drm_gem_object_unreference_unlocked(mfb->obj); + drm_gem_object_put_unlocked(mfb->obj); mfb->obj = NULL; } drm_fb_helper_fini(&mfbdev->helper); diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index dce8a3eb5a10..780f983b0294 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -18,7 +18,7 @@ static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb); - drm_gem_object_unreference_unlocked(mga_fb->obj); + drm_gem_object_put_unlocked(mga_fb->obj); drm_framebuffer_cleanup(fb); kfree(fb); } @@ -59,13 +59,13 @@ mgag200_user_framebuffer_create(struct drm_device *dev, mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL); if (!mga_fb) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(-ENOMEM); } ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj); if (ret) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); kfree(mga_fb); return ERR_PTR(ret); } @@ -317,7 +317,7 @@ int mgag200_dumb_create(struct drm_file *file, return ret; ret = drm_gem_handle_create(file, gobj, &handle); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (ret) return ret; @@ -366,6 +366,6 @@ mgag200_dumb_mmap_offset(struct drm_file *file, bo = gem_to_mga_bo(obj); *offset = mgag200_bo_mmap_offset(bo); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index f49f6ac5585c..b0129e7b29e3 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -832,7 +832,6 @@ static struct drm_driver msm_driver = { .gem_vm_ops = &vm_ops, .dumb_create = msm_gem_dumb_create, .dumb_map_offset = msm_gem_dumb_map_offset, - .dumb_destroy = drm_gem_dumb_destroy, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 93c38eb6d187..7fbad9cb656e 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -337,8 +337,6 @@ static struct drm_driver mxsfb_driver = { .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, - .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .dumb_destroy = drm_gem_dumb_destroy, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 29653fe5285c..0ea3ca823034 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -72,7 +72,7 @@ #define DRIVER_DESC "DRM module for PL111" -static struct drm_mode_config_funcs mode_config_funcs = { +static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = drm_fb_cma_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 403e135895bf..2445e75cf7ea 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -263,7 +263,6 @@ static struct drm_driver qxl_driver = { .dumb_create = qxl_mode_dumb_create, .dumb_map_offset = qxl_mode_dumb_mmap, - .dumb_destroy = drm_gem_dumb_destroy, #if defined(CONFIG_DEBUG_FS) .debugfs_init = qxl_debugfs_init, #endif diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index bd87768dd549..7a251a54e792 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -592,7 +592,7 @@ static void inno_hdmi_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); } -static struct drm_connector_funcs inno_hdmi_connector_funcs = { +static const struct drm_connector_funcs inno_hdmi_connector_funcs = { .fill_modes = inno_hdmi_probe_single_connector_modes, .detect = inno_hdmi_connector_detect, .destroy = inno_hdmi_connector_destroy, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 8a0f75612d4b..70773041785b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -48,7 +48,7 @@ static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb) int i; for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) - drm_gem_object_unreference_unlocked(rockchip_fb->obj[i]); + drm_gem_object_put_unlocked(rockchip_fb->obj[i]); drm_framebuffer_cleanup(fb); kfree(rockchip_fb); @@ -144,7 +144,7 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, width * drm_format_plane_cpp(mode_cmd->pixel_format, i); if (obj->size < min_size) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); ret = -EINVAL; goto err_gem_object_unreference; } @@ -161,7 +161,7 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, err_gem_object_unreference: for (i--; i >= 0; i--) - drm_gem_object_unreference_unlocked(objs[i]); + drm_gem_object_put_unlocked(objs[i]); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c index ce946b9c57a9..724579ebf947 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c @@ -173,7 +173,7 @@ void rockchip_drm_fbdev_fini(struct drm_device *dev) drm_fb_helper_unregister_fbi(helper); if (helper->fb) - drm_framebuffer_unreference(helper->fb); + drm_framebuffer_put(helper->fb); drm_fb_helper_fini(helper); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index f74333efe4bb..1869c8bb76c8 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -383,7 +383,7 @@ rockchip_gem_create_with_handle(struct drm_file *file_priv, goto err_handle_create; /* drop reference from allocate - handle holds it now. */ - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return rk_obj; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 948719dddc36..bf9ed0e63973 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1026,7 +1026,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc, if (old_plane_state->fb == new_plane_state->fb) continue; - drm_framebuffer_reference(old_plane_state->fb); + drm_framebuffer_get(old_plane_state->fb); drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb); set_bit(VOP_PENDING_FB_UNREF, &vop->pending); WARN_ON(drm_crtc_vblank_get(crtc) != 0); @@ -1150,7 +1150,7 @@ static void vop_fb_unref_worker(struct drm_flip_work *work, void *val) struct drm_framebuffer *fb = val; drm_crtc_vblank_put(&vop->crtc); - drm_framebuffer_unreference(fb); + drm_framebuffer_put(fb); } static void vop_handle_vblank(struct vop *vop) diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index 550bb262943f..42a238bbb899 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -119,7 +119,7 @@ sun4i_rgb_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); } -static struct drm_connector_funcs sun4i_rgb_con_funcs = { +static const struct drm_connector_funcs sun4i_rgb_con_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = sun4i_rgb_connector_destroy, .reset = drm_atomic_helper_connector_reset, diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c index 7b45ac9383ea..4edf15e299ab 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tv.c +++ b/drivers/gpu/drm/sun4i/sun4i_tv.c @@ -545,7 +545,7 @@ sun4i_tv_comp_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); } -static struct drm_connector_funcs sun4i_tv_comp_connector_funcs = { +static const struct drm_connector_funcs sun4i_tv_comp_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = sun4i_tv_comp_connector_destroy, .reset = drm_atomic_helper_connector_reset, diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 3ba659a5940d..224ce1dbb1cb 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1077,8 +1077,6 @@ static struct drm_driver tegra_drm_driver = { .gem_prime_import = tegra_gem_prime_import, .dumb_create = tegra_bo_dumb_create, - .dumb_map_offset = tegra_bo_dumb_map_offset, - .dumb_destroy = drm_gem_dumb_destroy, .ioctls = tegra_drm_ioctls, .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 7a39a355678a..c6079affe642 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -423,27 +423,6 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, return 0; } -int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, - u32 handle, u64 *offset) -{ - struct drm_gem_object *gem; - struct tegra_bo *bo; - - gem = drm_gem_object_lookup(file, handle); - if (!gem) { - dev_err(drm->dev, "failed to lookup GEM object\n"); - return -EINVAL; - } - - bo = to_tegra_bo(gem); - - *offset = drm_vma_node_offset_addr(&bo->gem.vma_node); - - drm_gem_object_unreference_unlocked(gem); - - return 0; -} - static int tegra_bo_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h index 8b32a6fd586d..8eb9fd24ef0e 100644 --- a/drivers/gpu/drm/tegra/gem.h +++ b/drivers/gpu/drm/tegra/gem.h @@ -67,8 +67,6 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, void tegra_bo_free_object(struct drm_gem_object *gem); int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, struct drm_mode_create_dumb *args); -int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, - u32 handle, u64 *offset); int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig index f17c3caceab2..2e790e7dced5 100644 --- a/drivers/gpu/drm/tinydrm/Kconfig +++ b/drivers/gpu/drm/tinydrm/Kconfig @@ -32,3 +32,13 @@ config TINYDRM_REPAPER 2.71" TFT EPD Panel (E2271CS021) If M is selected the module will be called repaper. + +config TINYDRM_ST7586 + tristate "DRM support for Sitronix ST7586 display panels" + depends on DRM_TINYDRM && SPI + select TINYDRM_MIPI_DBI + help + DRM driver for the following Sitronix ST7586 panels: + * LEGO MINDSTORMS EV3 + + If M is selected the module will be called st7586. diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile index 95bb4d4fa785..0c184bd1bb59 100644 --- a/drivers/gpu/drm/tinydrm/Makefile +++ b/drivers/gpu/drm/tinydrm/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o # Displays obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o +obj-$(CONFIG_TINYDRM_ST7586) += st7586.o diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c index 75808bb84c9a..bd6cce093a85 100644 --- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c @@ -185,7 +185,9 @@ EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565); /** * tinydrm_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale * @dst: 8-bit grayscale destination buffer + * @vaddr: XRGB8888 source buffer * @fb: DRM framebuffer + * @clip: Clip rectangle area to copy * * Drm doesn't have native monochrome or grayscale support. * Such drivers can announce the commonly supported XR24 format to userspace @@ -195,41 +197,31 @@ EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565); * where 1 means foreground color and 0 background color. * * ITU BT.601 is used for the RGB -> luma (brightness) conversion. - * - * Returns: - * Zero on success, negative error code on failure. */ -int tinydrm_xrgb8888_to_gray8(u8 *dst, struct drm_framebuffer *fb) +void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_clip_rect *clip) { - struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); - struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; - unsigned int x, y, pitch = fb->pitches[0]; - int ret = 0; + unsigned int len = (clip->x2 - clip->x1) * sizeof(u32); + unsigned int x, y; void *buf; u32 *src; if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888)) - return -EINVAL; + return; /* * The cma memory is write-combined so reads are uncached. * Speed up by fetching one line at a time. */ - buf = kmalloc(pitch, GFP_KERNEL); + buf = kmalloc(len, GFP_KERNEL); if (!buf) - return -ENOMEM; - - if (import_attach) { - ret = dma_buf_begin_cpu_access(import_attach->dmabuf, - DMA_FROM_DEVICE); - if (ret) - goto err_free; - } + return; - for (y = 0; y < fb->height; y++) { - src = cma_obj->vaddr + (y * pitch); - memcpy(buf, src, pitch); + for (y = clip->y1; y < clip->y2; y++) { + src = vaddr + (y * fb->pitches[0]); + src += clip->x1; + memcpy(buf, src, len); src = buf; - for (x = 0; x < fb->width; x++) { + for (x = clip->x1; x < clip->x2; x++) { u8 r = (*src & 0x00ff0000) >> 16; u8 g = (*src & 0x0000ff00) >> 8; u8 b = *src & 0x000000ff; @@ -240,13 +232,7 @@ int tinydrm_xrgb8888_to_gray8(u8 *dst, struct drm_framebuffer *fb) } } - if (import_attach) - ret = dma_buf_end_cpu_access(import_attach->dmabuf, - DMA_FROM_DEVICE); -err_free: kfree(buf); - - return ret; } EXPORT_SYMBOL(tinydrm_xrgb8888_to_gray8); diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c index f224b54a30f6..177e9d861001 100644 --- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c @@ -56,7 +56,7 @@ static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = { static enum drm_connector_status tinydrm_connector_detect(struct drm_connector *connector, bool force) { - if (drm_device_is_unplugged(connector->dev)) + if (drm_dev_is_unplugged(connector->dev)) return connector_status_disconnected; return connector->status; diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c index 3343d3f15a90..30dc97b3ff21 100644 --- a/drivers/gpu/drm/tinydrm/repaper.c +++ b/drivers/gpu/drm/tinydrm/repaper.c @@ -18,6 +18,7 @@ */ #include <linux/delay.h> +#include <linux/dma-buf.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of_device.h> @@ -525,11 +526,20 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb, struct drm_clip_rect *clips, unsigned int num_clips) { + struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; struct tinydrm_device *tdev = fb->dev->dev_private; struct repaper_epd *epd = epd_from_tinydrm(tdev); + struct drm_clip_rect clip; u8 *buf = NULL; int ret = 0; + /* repaper can't do partial updates */ + clip.x1 = 0; + clip.x2 = fb->width; + clip.y1 = 0; + clip.y2 = fb->height; + mutex_lock(&tdev->dirty_lock); if (!epd->enabled) @@ -550,9 +560,21 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb, goto out_unlock; } - ret = tinydrm_xrgb8888_to_gray8(buf, fb); - if (ret) - goto out_unlock; + if (import_attach) { + ret = dma_buf_begin_cpu_access(import_attach->dmabuf, + DMA_FROM_DEVICE); + if (ret) + goto out_unlock; + } + + tinydrm_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip); + + if (import_attach) { + ret = dma_buf_end_cpu_access(import_attach->dmabuf, + DMA_FROM_DEVICE); + if (ret) + goto out_unlock; + } repaper_gray8_to_mono_reversed(buf, fb->width, fb->height); diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c new file mode 100644 index 000000000000..1b39d3fb17f7 --- /dev/null +++ b/drivers/gpu/drm/tinydrm/st7586.c @@ -0,0 +1,428 @@ +/* + * DRM driver for Sitronix ST7586 panels + * + * Copyright 2017 David Lechner <david@lechnology.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/delay.h> +#include <linux/dma-buf.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/property.h> +#include <linux/spi/spi.h> +#include <video/mipi_display.h> + +#include <drm/tinydrm/mipi-dbi.h> +#include <drm/tinydrm/tinydrm-helpers.h> + +/* controller-specific commands */ +#define ST7586_DISP_MODE_GRAY 0x38 +#define ST7586_DISP_MODE_MONO 0x39 +#define ST7586_ENABLE_DDRAM 0x3a +#define ST7586_SET_DISP_DUTY 0xb0 +#define ST7586_SET_PART_DISP 0xb4 +#define ST7586_SET_NLINE_INV 0xb5 +#define ST7586_SET_VOP 0xc0 +#define ST7586_SET_BIAS_SYSTEM 0xc3 +#define ST7586_SET_BOOST_LEVEL 0xc4 +#define ST7586_SET_VOP_OFFSET 0xc7 +#define ST7586_ENABLE_ANALOG 0xd0 +#define ST7586_AUTO_READ_CTRL 0xd7 +#define ST7586_OTP_RW_CTRL 0xe0 +#define ST7586_OTP_CTRL_OUT 0xe1 +#define ST7586_OTP_READ 0xe3 + +#define ST7586_DISP_CTRL_MX BIT(6) +#define ST7586_DISP_CTRL_MY BIT(7) + +/* + * The ST7586 controller has an unusual pixel format where 2bpp grayscale is + * packed 3 pixels per byte with the first two pixels using 3 bits and the 3rd + * pixel using only 2 bits. + * + * | D7 | D6 | D5 || | || 2bpp | + * | (D4) | (D3) | (D2) || D1 | D0 || GRAY | + * +------+------+------++------+------++------+ + * | 1 | 1 | 1 || 1 | 1 || 0 0 | black + * | 1 | 0 | 0 || 1 | 0 || 0 1 | dark gray + * | 0 | 1 | 0 || 0 | 1 || 1 0 | light gray + * | 0 | 0 | 0 || 0 | 0 || 1 1 | white + */ + +static const u8 st7586_lookup[] = { 0x7, 0x4, 0x2, 0x0 }; + +static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr, + struct drm_framebuffer *fb, + struct drm_clip_rect *clip) +{ + size_t len = (clip->x2 - clip->x1) * (clip->y2 - clip->y1); + unsigned int x, y; + u8 *src, *buf, val; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return; + + tinydrm_xrgb8888_to_gray8(buf, vaddr, fb, clip); + src = buf; + + for (y = clip->y1; y < clip->y2; y++) { + for (x = clip->x1; x < clip->x2; x += 3) { + val = st7586_lookup[*src++ >> 6] << 5; + val |= st7586_lookup[*src++ >> 6] << 2; + val |= st7586_lookup[*src++ >> 6] >> 1; + *dst++ = val; + } + } + + kfree(buf); +} + +static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb, + struct drm_clip_rect *clip) +{ + struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; + void *src = cma_obj->vaddr; + int ret = 0; + + if (import_attach) { + ret = dma_buf_begin_cpu_access(import_attach->dmabuf, + DMA_FROM_DEVICE); + if (ret) + return ret; + } + + st7586_xrgb8888_to_gray332(dst, src, fb, clip); + + if (import_attach) + ret = dma_buf_end_cpu_access(import_attach->dmabuf, + DMA_FROM_DEVICE); + + return ret; +} + +static int st7586_fb_dirty(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned int flags, + unsigned int color, struct drm_clip_rect *clips, + unsigned int num_clips) +{ + struct tinydrm_device *tdev = fb->dev->dev_private; + struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct drm_clip_rect clip; + int start, end; + int ret = 0; + + mutex_lock(&tdev->dirty_lock); + + if (!mipi->enabled) + goto out_unlock; + + /* fbdev can flush even when we're not interested */ + if (tdev->pipe.plane.fb != fb) + goto out_unlock; + + tinydrm_merge_clips(&clip, clips, num_clips, flags, fb->width, + fb->height); + + /* 3 pixels per byte, so grow clip to nearest multiple of 3 */ + clip.x1 = rounddown(clip.x1, 3); + clip.x2 = roundup(clip.x2, 3); + + DRM_DEBUG("Flushing [FB:%d] x1=%u, x2=%u, y1=%u, y2=%u\n", fb->base.id, + clip.x1, clip.x2, clip.y1, clip.y2); + + ret = st7586_buf_copy(mipi->tx_buf, fb, &clip); + if (ret) + goto out_unlock; + + /* Pixels are packed 3 per byte */ + start = clip.x1 / 3; + end = clip.x2 / 3; + + mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS, + (start >> 8) & 0xFF, start & 0xFF, + (end >> 8) & 0xFF, (end - 1) & 0xFF); + mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS, + (clip.y1 >> 8) & 0xFF, clip.y1 & 0xFF, + (clip.y2 >> 8) & 0xFF, (clip.y2 - 1) & 0xFF); + + ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, + (u8 *)mipi->tx_buf, + (end - start) * (clip.y2 - clip.y1)); + +out_unlock: + mutex_unlock(&tdev->dirty_lock); + + if (ret) + dev_err_once(fb->dev->dev, "Failed to update display %d\n", + ret); + + return ret; +} + +static const struct drm_framebuffer_funcs st7586_fb_funcs = { + .destroy = drm_fb_cma_destroy, + .create_handle = drm_fb_cma_create_handle, + .dirty = st7586_fb_dirty, +}; + +void st7586_pipe_enable(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state) +{ + struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); + struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct drm_framebuffer *fb = pipe->plane.fb; + struct device *dev = tdev->drm->dev; + int ret; + u8 addr_mode; + + DRM_DEBUG_KMS("\n"); + + mipi_dbi_hw_reset(mipi); + ret = mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f); + if (ret) { + dev_err(dev, "Error sending command %d\n", ret); + return; + } + + mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00); + + msleep(10); + + mipi_dbi_command(mipi, ST7586_OTP_READ); + + msleep(20); + + mipi_dbi_command(mipi, ST7586_OTP_CTRL_OUT); + mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE); + mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF); + + msleep(50); + + mipi_dbi_command(mipi, ST7586_SET_VOP_OFFSET, 0x00); + mipi_dbi_command(mipi, ST7586_SET_VOP, 0xe3, 0x00); + mipi_dbi_command(mipi, ST7586_SET_BIAS_SYSTEM, 0x02); + mipi_dbi_command(mipi, ST7586_SET_BOOST_LEVEL, 0x04); + mipi_dbi_command(mipi, ST7586_ENABLE_ANALOG, 0x1d); + mipi_dbi_command(mipi, ST7586_SET_NLINE_INV, 0x00); + mipi_dbi_command(mipi, ST7586_DISP_MODE_GRAY); + mipi_dbi_command(mipi, ST7586_ENABLE_DDRAM, 0x02); + + switch (mipi->rotation) { + default: + addr_mode = 0x00; + break; + case 90: + addr_mode = ST7586_DISP_CTRL_MY; + break; + case 180: + addr_mode = ST7586_DISP_CTRL_MX | ST7586_DISP_CTRL_MY; + break; + case 270: + addr_mode = ST7586_DISP_CTRL_MX; + break; + } + mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); + + mipi_dbi_command(mipi, ST7586_SET_DISP_DUTY, 0x7f); + mipi_dbi_command(mipi, ST7586_SET_PART_DISP, 0xa0); + mipi_dbi_command(mipi, MIPI_DCS_SET_PARTIAL_AREA, 0x00, 0x00, 0x00, 0x77); + mipi_dbi_command(mipi, MIPI_DCS_EXIT_INVERT_MODE); + + msleep(100); + + mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON); + + mipi->enabled = true; + + if (fb) + fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0); +} + +static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe) +{ + struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); + struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + + DRM_DEBUG_KMS("\n"); + + if (!mipi->enabled) + return; + + mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF); + mipi->enabled = false; +} + +static const u32 st7586_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +static int st7586_init(struct device *dev, struct mipi_dbi *mipi, + const struct drm_simple_display_pipe_funcs *pipe_funcs, + struct drm_driver *driver, const struct drm_display_mode *mode, + unsigned int rotation) +{ + size_t bufsize = (mode->vdisplay + 2) / 3 * mode->hdisplay; + struct tinydrm_device *tdev = &mipi->tinydrm; + int ret; + + mutex_init(&mipi->cmdlock); + + mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL); + if (!mipi->tx_buf) + return -ENOMEM; + + ret = devm_tinydrm_init(dev, tdev, &st7586_fb_funcs, driver); + if (ret) + return ret; + + ret = tinydrm_display_pipe_init(tdev, pipe_funcs, + DRM_MODE_CONNECTOR_VIRTUAL, + st7586_formats, + ARRAY_SIZE(st7586_formats), + mode, rotation); + if (ret) + return ret; + + tdev->drm->mode_config.preferred_depth = 32; + mipi->rotation = rotation; + + drm_mode_config_reset(tdev->drm); + + DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n", + tdev->drm->mode_config.preferred_depth, rotation); + + return 0; +} + +static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = { + .enable = st7586_pipe_enable, + .disable = st7586_pipe_disable, + .update = tinydrm_display_pipe_update, + .prepare_fb = tinydrm_display_pipe_prepare_fb, +}; + +static const struct drm_display_mode st7586_mode = { + TINYDRM_MODE(178, 128, 37, 27), +}; + +DEFINE_DRM_GEM_CMA_FOPS(st7586_fops); + +static struct drm_driver st7586_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | + DRIVER_ATOMIC, + .fops = &st7586_fops, + TINYDRM_GEM_DRIVER_OPS, + .lastclose = tinydrm_lastclose, + .debugfs_init = mipi_dbi_debugfs_init, + .name = "st7586", + .desc = "Sitronix ST7586", + .date = "20170801", + .major = 1, + .minor = 0, +}; + +static const struct of_device_id st7586_of_match[] = { + { .compatible = "lego,ev3-lcd" }, + {}, +}; +MODULE_DEVICE_TABLE(of, st7586_of_match); + +static const struct spi_device_id st7586_id[] = { + { "ev3-lcd", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(spi, st7586_id); + +static int st7586_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct tinydrm_device *tdev; + struct mipi_dbi *mipi; + struct gpio_desc *a0; + u32 rotation = 0; + int ret; + + mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + if (!mipi) + return -ENOMEM; + + mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(mipi->reset)) { + dev_err(dev, "Failed to get gpio 'reset'\n"); + return PTR_ERR(mipi->reset); + } + + a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW); + if (IS_ERR(a0)) { + dev_err(dev, "Failed to get gpio 'a0'\n"); + return PTR_ERR(a0); + } + + device_property_read_u32(dev, "rotation", &rotation); + + ret = mipi_dbi_spi_init(spi, mipi, a0); + if (ret) + return ret; + + /* Cannot read from this controller via SPI */ + mipi->read_commands = NULL; + + /* + * we are using 8-bit data, so we are not actually swapping anything, + * but setting mipi->swap_bytes makes mipi_dbi_typec3_command() do the + * right thing and not use 16-bit transfers (which results in swapped + * bytes on little-endian systems and causes out of order data to be + * sent to the display). + */ + mipi->swap_bytes = true; + + ret = st7586_init(&spi->dev, mipi, &st7586_pipe_funcs, &st7586_driver, + &st7586_mode, rotation); + if (ret) + return ret; + + tdev = &mipi->tinydrm; + + ret = devm_tinydrm_register(tdev); + if (ret) + return ret; + + spi_set_drvdata(spi, mipi); + + DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n", + tdev->drm->driver->name, dev_name(dev), + spi->max_speed_hz / 1000000, + tdev->drm->primary->index); + + return 0; +} + +static void st7586_shutdown(struct spi_device *spi) +{ + struct mipi_dbi *mipi = spi_get_drvdata(spi); + + tinydrm_shutdown(&mipi->tinydrm); +} + +static struct spi_driver st7586_spi_driver = { + .driver = { + .name = "st7586", + .owner = THIS_MODULE, + .of_match_table = st7586_of_match, + }, + .id_table = st7586_id, + .probe = st7586_probe, + .shutdown = st7586_shutdown, +}; +module_spi_driver(st7586_spi_driver); + +MODULE_DESCRIPTION("Sitronix ST7586 DRM driver"); +MODULE_AUTHOR("David Lechner <david@lechnology.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index d2f57c52f7db..9f9a49748d17 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -96,7 +96,7 @@ static int udl_mode_valid(struct drm_connector *connector, static enum drm_connector_status udl_detect(struct drm_connector *connector, bool force) { - if (drm_device_is_unplugged(connector->dev)) + if (drm_dev_is_unplugged(connector->dev)) return connector_status_disconnected; return connector_status_connected; } diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 0f02e1acf0ba..bfacb294d5c4 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -54,7 +54,6 @@ static struct drm_driver driver = { .dumb_create = udl_dumb_create, .dumb_map_offset = udl_gem_mmap, - .dumb_destroy = drm_gem_dumb_destroy, .fops = &udl_driver_fops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, @@ -102,7 +101,7 @@ static void udl_usb_disconnect(struct usb_interface *interface) drm_kms_helper_poll_disable(dev); udl_fbdev_unplug(dev); udl_drop_usb(dev); - drm_unplug_dev(dev); + drm_dev_unplug(dev); } /* diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index a5c54dc60def..b7ca90db4e80 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -198,7 +198,7 @@ static int udl_fb_open(struct fb_info *info, int user) struct udl_device *udl = dev->dev_private; /* If the USB device is gone, we don't accept new opens */ - if (drm_device_is_unplugged(udl->ddev)) + if (drm_dev_is_unplugged(udl->ddev)) return -ENODEV; ufbdev->fb_count++; @@ -309,7 +309,7 @@ static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb) struct udl_framebuffer *ufb = to_udl_fb(fb); if (ufb->obj) - drm_gem_object_unreference_unlocked(&ufb->obj->base); + drm_gem_object_put_unlocked(&ufb->obj->base); drm_framebuffer_cleanup(fb); kfree(ufb); @@ -403,7 +403,7 @@ static int udlfb_create(struct drm_fb_helper *helper, return ret; out_gfree: - drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); + drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); out: return ret; } @@ -419,7 +419,7 @@ static void udl_fbdev_destroy(struct drm_device *dev, drm_fb_helper_fini(&ufbdev->helper); drm_framebuffer_unregister_private(&ufbdev->ufb.base); drm_framebuffer_cleanup(&ufbdev->ufb.base); - drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); + drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); } int udl_fbdev_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index db9ceceba30e..dee6bd9a3dd1 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file, return ret; } - drm_gem_object_unreference_unlocked(&obj->base); + drm_gem_object_put_unlocked(&obj->base); *handle_p = handle; return 0; } @@ -234,7 +234,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); out: - drm_gem_object_unreference(&gobj->base); + drm_gem_object_put(&gobj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index b24dd8685590..3afdbf4bc10b 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -366,7 +366,7 @@ int vc4_dumb_create(struct drm_file *file_priv, return PTR_ERR(bo); ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); - drm_gem_object_unreference_unlocked(&bo->base.base); + drm_gem_object_put_unlocked(&bo->base.base); return ret; } @@ -482,7 +482,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) struct vc4_bo *bo = to_vc4_bo(obj); if (bo->validated_shader) { - DRM_ERROR("Attempting to export shader BO\n"); + DRM_DEBUG("Attempting to export shader BO\n"); return ERR_PTR(-EINVAL); } @@ -503,7 +503,7 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma) bo = to_vc4_bo(gem_obj); if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { - DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); + DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n"); return -EINVAL; } @@ -528,7 +528,7 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) struct vc4_bo *bo = to_vc4_bo(obj); if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { - DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); + DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n"); return -EINVAL; } @@ -540,7 +540,7 @@ void *vc4_prime_vmap(struct drm_gem_object *obj) struct vc4_bo *bo = to_vc4_bo(obj); if (bo->validated_shader) { - DRM_ERROR("mmaping of shader BOs not allowed.\n"); + DRM_DEBUG("mmaping of shader BOs not allowed.\n"); return ERR_PTR(-EINVAL); } @@ -581,7 +581,7 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, return PTR_ERR(bo); ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); - drm_gem_object_unreference_unlocked(&bo->base.base); + drm_gem_object_put_unlocked(&bo->base.base); return ret; } @@ -594,14 +594,14 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { - DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -EINVAL; } /* The mmap offset was set up at BO allocation time. */ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); - drm_gem_object_unreference_unlocked(gem_obj); + drm_gem_object_put_unlocked(gem_obj); return 0; } @@ -657,7 +657,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); fail: - drm_gem_object_unreference_unlocked(&bo->base.base); + drm_gem_object_put_unlocked(&bo->base.base); return ret; } @@ -698,13 +698,13 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { - DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -ENOENT; } bo = to_vc4_bo(gem_obj); bo->t_format = t_format; - drm_gem_object_unreference_unlocked(gem_obj); + drm_gem_object_put_unlocked(gem_obj); return 0; } @@ -729,7 +729,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { - DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -ENOENT; } bo = to_vc4_bo(gem_obj); @@ -739,7 +739,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, else args->modifier = DRM_FORMAT_MOD_NONE; - drm_gem_object_unreference_unlocked(gem_obj); + drm_gem_object_put_unlocked(gem_obj); return 0; } @@ -830,7 +830,7 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data, ret = -ENOMEM; mutex_unlock(&vc4->bo_lock); - drm_gem_object_unreference_unlocked(gem_obj); + drm_gem_object_put_unlocked(gem_obj); return ret; } diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 664a55b45af0..ce1e3b9e14c9 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -763,7 +763,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) } drm_crtc_vblank_put(crtc); - drm_framebuffer_unreference(flip_state->fb); + drm_framebuffer_put(flip_state->fb); kfree(flip_state); up(&vc4->async_modeset); @@ -792,7 +792,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, if (!flip_state) return -ENOMEM; - drm_framebuffer_reference(fb); + drm_framebuffer_get(fb); flip_state->fb = fb; flip_state->crtc = crtc; flip_state->event = event; @@ -800,7 +800,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, /* Make sure all other async modesetes have landed. */ ret = down_interruptible(&vc4->async_modeset); if (ret) { - drm_framebuffer_unreference(fb); + drm_framebuffer_put(fb); kfree(flip_state); return ret; } diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index e8f0e1790d5e..1c96edcb302b 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -99,6 +99,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, case DRM_VC4_PARAM_SUPPORTS_BRANCHES: case DRM_VC4_PARAM_SUPPORTS_ETC1: case DRM_VC4_PARAM_SUPPORTS_THREADED_FS: + case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER: args->value = true; break; default: diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 629d372633e6..d1e0dc908048 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -1636,14 +1636,10 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master, pm_runtime_disable(dev); - drm_bridge_remove(dsi->bridge); vc4_dsi_encoder_destroy(dsi->encoder); mipi_dsi_host_unregister(&dsi->dsi_host); - clk_disable_unprepare(dsi->pll_phy_clock); - clk_disable_unprepare(dsi->escape_clock); - if (dsi->port == 1) vc4->dsi1 = NULL; } diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 209fccd0d3b4..d0c6bfb68c4e 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -55,7 +55,7 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state) unsigned int i; for (i = 0; i < state->user_state.bo_count; i++) - drm_gem_object_unreference_unlocked(state->bo[i]); + drm_gem_object_put_unlocked(state->bo[i]); kfree(state); } @@ -188,12 +188,12 @@ vc4_save_hang_state(struct drm_device *dev) continue; for (j = 0; j < exec[i]->bo_count; j++) { - drm_gem_object_reference(&exec[i]->bo[j]->base); + drm_gem_object_get(&exec[i]->bo[j]->base); kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; } list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { - drm_gem_object_reference(&bo->base.base); + drm_gem_object_get(&bo->base.base); kernel_state->bo[j + prev_idx] = &bo->base.base; j++; } @@ -659,7 +659,7 @@ vc4_cl_lookup_bos(struct drm_device *dev, /* See comment on bo_index for why we have to check * this. */ - DRM_ERROR("Rendering requires BOs to validate\n"); + DRM_DEBUG("Rendering requires BOs to validate\n"); return -EINVAL; } @@ -690,13 +690,13 @@ vc4_cl_lookup_bos(struct drm_device *dev, struct drm_gem_object *bo = idr_find(&file_priv->object_idr, handles[i]); if (!bo) { - DRM_ERROR("Failed to look up GEM BO %d: %d\n", + DRM_DEBUG("Failed to look up GEM BO %d: %d\n", i, handles[i]); ret = -EINVAL; spin_unlock(&file_priv->table_lock); goto fail; } - drm_gem_object_reference(bo); + drm_gem_object_get(bo); exec->bo[i] = (struct drm_gem_cma_object *)bo; } spin_unlock(&file_priv->table_lock); @@ -728,7 +728,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) args->shader_rec_count >= (UINT_MAX / sizeof(struct vc4_shader_state)) || temp_size < exec_size) { - DRM_ERROR("overflow in exec arguments\n"); + DRM_DEBUG("overflow in exec arguments\n"); ret = -EINVAL; goto fail; } @@ -834,7 +834,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) if (exec->bo) { for (i = 0; i < exec->bo_count; i++) - drm_gem_object_unreference_unlocked(&exec->bo[i]->base); + drm_gem_object_put_unlocked(&exec->bo[i]->base); kvfree(exec->bo); } @@ -842,7 +842,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_bo *bo = list_first_entry(&exec->unref_list, struct vc4_bo, unref_head); list_del(&bo->unref_head); - drm_gem_object_unreference_unlocked(&bo->base.base); + drm_gem_object_put_unlocked(&bo->base.base); } /* Free up the allocation of any bin slots we used. */ @@ -973,7 +973,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data, gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { - DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -EINVAL; } bo = to_vc4_bo(gem_obj); @@ -981,7 +981,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data, ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno, &args->timeout_ns); - drm_gem_object_unreference_unlocked(gem_obj); + drm_gem_object_put_unlocked(gem_obj); return ret; } @@ -1007,8 +1007,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, struct ww_acquire_ctx acquire_ctx; int ret = 0; - if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { - DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); + if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR | + VC4_SUBMIT_CL_FIXED_RCL_ORDER | + VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X | + VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) { + DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags); return -EINVAL; } @@ -1117,6 +1120,4 @@ vc4_gem_destroy(struct drm_device *dev) if (vc4->hang_state) vc4_free_hang_state(dev, vc4->hang_state); - - vc4_bo_cache_destroy(dev); } diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index ff09b8e2f9ee..937da8dd65b8 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -288,6 +288,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); drm_edid_to_eld(connector, edid); + kfree(edid); return ret; } diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index aeec6e8703d2..dfe7554268f0 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -169,7 +169,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, gem_obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); if (!gem_obj) { - DRM_ERROR("Failed to look up GEM BO %d\n", + DRM_DEBUG("Failed to look up GEM BO %d\n", mode_cmd->handles[0]); return ERR_PTR(-ENOENT); } @@ -184,7 +184,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE; } - drm_gem_object_unreference_unlocked(gem_obj); + drm_gem_object_put_unlocked(gem_obj); mode_cmd = &mode_cmd_local; } diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 4a8051532f00..273984f71ae2 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c @@ -261,8 +261,17 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec, uint8_t max_y_tile = args->max_y_tile; uint8_t xtiles = max_x_tile - min_x_tile + 1; uint8_t ytiles = max_y_tile - min_y_tile + 1; - uint8_t x, y; + uint8_t xi, yi; uint32_t size, loop_body_size; + bool positive_x = true; + bool positive_y = true; + + if (args->flags & VC4_SUBMIT_CL_FIXED_RCL_ORDER) { + if (!(args->flags & VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X)) + positive_x = false; + if (!(args->flags & VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) + positive_y = false; + } size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE; loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE; @@ -354,10 +363,12 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec, rcl_u16(setup, args->height); rcl_u16(setup, args->color_write.bits); - for (y = min_y_tile; y <= max_y_tile; y++) { - for (x = min_x_tile; x <= max_x_tile; x++) { - bool first = (x == min_x_tile && y == min_y_tile); - bool last = (x == max_x_tile && y == max_y_tile); + for (yi = 0; yi < ytiles; yi++) { + int y = positive_y ? min_y_tile + yi : max_y_tile - yi; + for (xi = 0; xi < xtiles; xi++) { + int x = positive_x ? min_x_tile + xi : max_x_tile - xi; + bool first = (xi == 0 && yi == 0); + bool last = (xi == xtiles - 1 && yi == ytiles - 1); emit_tile(exec, setup, x, y, first, last); } @@ -378,14 +389,14 @@ static int vc4_full_res_bounds_check(struct vc4_exec_info *exec, u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32); if (surf->offset > obj->base.size) { - DRM_ERROR("surface offset %d > BO size %zd\n", + DRM_DEBUG("surface offset %d > BO size %zd\n", surf->offset, obj->base.size); return -EINVAL; } if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE < render_tiles_stride * args->max_y_tile + args->max_x_tile) { - DRM_ERROR("MSAA tile %d, %d out of bounds " + DRM_DEBUG("MSAA tile %d, %d out of bounds " "(bo size %zd, offset %d).\n", args->max_x_tile, args->max_y_tile, obj->base.size, @@ -401,7 +412,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec, struct drm_vc4_submit_rcl_surface *surf) { if (surf->flags != 0 || surf->bits != 0) { - DRM_ERROR("MSAA surface had nonzero flags/bits\n"); + DRM_DEBUG("MSAA surface had nonzero flags/bits\n"); return -EINVAL; } @@ -415,7 +426,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec, exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; if (surf->offset & 0xf) { - DRM_ERROR("MSAA write must be 16b aligned.\n"); + DRM_DEBUG("MSAA write must be 16b aligned.\n"); return -EINVAL; } @@ -437,7 +448,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, int ret; if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { - DRM_ERROR("Extra flags set\n"); + DRM_DEBUG("Extra flags set\n"); return -EINVAL; } @@ -453,12 +464,12 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { if (surf == &exec->args->zs_write) { - DRM_ERROR("general zs write may not be a full-res.\n"); + DRM_DEBUG("general zs write may not be a full-res.\n"); return -EINVAL; } if (surf->bits != 0) { - DRM_ERROR("load/store general bits set with " + DRM_DEBUG("load/store general bits set with " "full res load/store.\n"); return -EINVAL; } @@ -473,19 +484,19 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK | VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK | VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) { - DRM_ERROR("Unknown bits in load/store: 0x%04x\n", + DRM_DEBUG("Unknown bits in load/store: 0x%04x\n", surf->bits); return -EINVAL; } if (tiling > VC4_TILING_FORMAT_LT) { - DRM_ERROR("Bad tiling format\n"); + DRM_DEBUG("Bad tiling format\n"); return -EINVAL; } if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) { if (format != 0) { - DRM_ERROR("No color format should be set for ZS\n"); + DRM_DEBUG("No color format should be set for ZS\n"); return -EINVAL; } cpp = 4; @@ -499,16 +510,16 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, cpp = 4; break; default: - DRM_ERROR("Bad tile buffer format\n"); + DRM_DEBUG("Bad tile buffer format\n"); return -EINVAL; } } else { - DRM_ERROR("Bad load/store buffer %d.\n", buffer); + DRM_DEBUG("Bad load/store buffer %d.\n", buffer); return -EINVAL; } if (surf->offset & 0xf) { - DRM_ERROR("load/store buffer must be 16b aligned.\n"); + DRM_DEBUG("load/store buffer must be 16b aligned.\n"); return -EINVAL; } @@ -533,7 +544,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, int cpp; if (surf->flags != 0) { - DRM_ERROR("No flags supported on render config.\n"); + DRM_DEBUG("No flags supported on render config.\n"); return -EINVAL; } @@ -541,7 +552,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, VC4_RENDER_CONFIG_FORMAT_MASK | VC4_RENDER_CONFIG_MS_MODE_4X | VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) { - DRM_ERROR("Unknown bits in render config: 0x%04x\n", + DRM_DEBUG("Unknown bits in render config: 0x%04x\n", surf->bits); return -EINVAL; } @@ -556,7 +567,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; if (tiling > VC4_TILING_FORMAT_LT) { - DRM_ERROR("Bad tiling format\n"); + DRM_DEBUG("Bad tiling format\n"); return -EINVAL; } @@ -569,7 +580,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, cpp = 4; break; default: - DRM_ERROR("Bad tile buffer format\n"); + DRM_DEBUG("Bad tile buffer format\n"); return -EINVAL; } @@ -590,7 +601,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) if (args->min_x_tile > args->max_x_tile || args->min_y_tile > args->max_y_tile) { - DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n", + DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n", args->min_x_tile, args->min_y_tile, args->max_x_tile, args->max_y_tile); return -EINVAL; @@ -599,7 +610,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) if (has_bin && (args->max_x_tile > exec->bin_tiles_x || args->max_y_tile > exec->bin_tiles_y)) { - DRM_ERROR("Render tiles (%d,%d) outside of bin config " + DRM_DEBUG("Render tiles (%d,%d) outside of bin config " "(%d,%d)\n", args->max_x_tile, args->max_y_tile, exec->bin_tiles_x, exec->bin_tiles_y); @@ -642,7 +653,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) */ if (!setup.color_write && !setup.zs_write && !setup.msaa_color_write && !setup.msaa_zs_write) { - DRM_ERROR("RCL requires color or Z/S write\n"); + DRM_DEBUG("RCL requires color or Z/S write\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index 814b512c6b9a..2db485abb186 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) struct vc4_bo *bo; if (hindex >= exec->bo_count) { - DRM_ERROR("BO index %d greater than BO count %d\n", + DRM_DEBUG("BO index %d greater than BO count %d\n", hindex, exec->bo_count); return NULL; } @@ -117,7 +117,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) bo = to_vc4_bo(&obj->base); if (bo->validated_shader) { - DRM_ERROR("Trying to use shader BO as something other than " + DRM_DEBUG("Trying to use shader BO as something other than " "a shader\n"); return NULL; } @@ -172,7 +172,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, * our math. */ if (width > 4096 || height > 4096) { - DRM_ERROR("Surface dimensions (%d,%d) too large", + DRM_DEBUG("Surface dimensions (%d,%d) too large", width, height); return false; } @@ -191,7 +191,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, aligned_height = round_up(height, utile_h); break; default: - DRM_ERROR("buffer tiling %d unsupported\n", tiling_format); + DRM_DEBUG("buffer tiling %d unsupported\n", tiling_format); return false; } @@ -200,7 +200,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, if (size + offset < size || size + offset > fbo->base.size) { - DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n", + DRM_DEBUG("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n", width, height, aligned_width, aligned_height, size, offset, fbo->base.size); @@ -214,7 +214,7 @@ static int validate_flush(VALIDATE_ARGS) { if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) { - DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n"); + DRM_DEBUG("Bin CL must end with VC4_PACKET_FLUSH\n"); return -EINVAL; } exec->found_flush = true; @@ -226,13 +226,13 @@ static int validate_start_tile_binning(VALIDATE_ARGS) { if (exec->found_start_tile_binning_packet) { - DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n"); + DRM_DEBUG("Duplicate VC4_PACKET_START_TILE_BINNING\n"); return -EINVAL; } exec->found_start_tile_binning_packet = true; if (!exec->found_tile_binning_mode_config_packet) { - DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); + DRM_DEBUG("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); return -EINVAL; } @@ -243,7 +243,7 @@ static int validate_increment_semaphore(VALIDATE_ARGS) { if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) { - DRM_ERROR("Bin CL must end with " + DRM_DEBUG("Bin CL must end with " "VC4_PACKET_INCREMENT_SEMAPHORE\n"); return -EINVAL; } @@ -264,7 +264,7 @@ validate_indexed_prim_list(VALIDATE_ARGS) /* Check overflow condition */ if (exec->shader_state_count == 0) { - DRM_ERROR("shader state must precede primitives\n"); + DRM_DEBUG("shader state must precede primitives\n"); return -EINVAL; } shader_state = &exec->shader_state[exec->shader_state_count - 1]; @@ -281,7 +281,7 @@ validate_indexed_prim_list(VALIDATE_ARGS) if (offset > ib->base.size || (ib->base.size - offset) / index_size < length) { - DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n", + DRM_DEBUG("IB access overflow (%d + %d*%d > %zd)\n", offset, length, index_size, ib->base.size); return -EINVAL; } @@ -301,13 +301,13 @@ validate_gl_array_primitive(VALIDATE_ARGS) /* Check overflow condition */ if (exec->shader_state_count == 0) { - DRM_ERROR("shader state must precede primitives\n"); + DRM_DEBUG("shader state must precede primitives\n"); return -EINVAL; } shader_state = &exec->shader_state[exec->shader_state_count - 1]; if (length + base_index < length) { - DRM_ERROR("primitive vertex count overflow\n"); + DRM_DEBUG("primitive vertex count overflow\n"); return -EINVAL; } max_index = length + base_index - 1; @@ -324,7 +324,7 @@ validate_gl_shader_state(VALIDATE_ARGS) uint32_t i = exec->shader_state_count++; if (i >= exec->shader_state_size) { - DRM_ERROR("More requests for shader states than declared\n"); + DRM_DEBUG("More requests for shader states than declared\n"); return -EINVAL; } @@ -332,7 +332,7 @@ validate_gl_shader_state(VALIDATE_ARGS) exec->shader_state[i].max_index = 0; if (exec->shader_state[i].addr & ~0xf) { - DRM_ERROR("high bits set in GL shader rec reference\n"); + DRM_DEBUG("high bits set in GL shader rec reference\n"); return -EINVAL; } @@ -356,7 +356,7 @@ validate_tile_binning_config(VALIDATE_ARGS) int bin_slot; if (exec->found_tile_binning_mode_config_packet) { - DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); + DRM_DEBUG("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); return -EINVAL; } exec->found_tile_binning_mode_config_packet = true; @@ -368,14 +368,14 @@ validate_tile_binning_config(VALIDATE_ARGS) if (exec->bin_tiles_x == 0 || exec->bin_tiles_y == 0) { - DRM_ERROR("Tile binning config of %dx%d too small\n", + DRM_DEBUG("Tile binning config of %dx%d too small\n", exec->bin_tiles_x, exec->bin_tiles_y); return -EINVAL; } if (flags & (VC4_BIN_CONFIG_DB_NON_MS | VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) { - DRM_ERROR("unsupported binning config flags 0x%02x\n", flags); + DRM_DEBUG("unsupported binning config flags 0x%02x\n", flags); return -EINVAL; } @@ -493,20 +493,20 @@ vc4_validate_bin_cl(struct drm_device *dev, const struct cmd_info *info; if (cmd >= ARRAY_SIZE(cmd_info)) { - DRM_ERROR("0x%08x: packet %d out of bounds\n", + DRM_DEBUG("0x%08x: packet %d out of bounds\n", src_offset, cmd); return -EINVAL; } info = &cmd_info[cmd]; if (!info->name) { - DRM_ERROR("0x%08x: packet %d invalid\n", + DRM_DEBUG("0x%08x: packet %d invalid\n", src_offset, cmd); return -EINVAL; } if (src_offset + info->len > len) { - DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x " + DRM_DEBUG("0x%08x: packet %d (%s) length 0x%08x " "exceeds bounds (0x%08x)\n", src_offset, cmd, info->name, info->len, src_offset + len); @@ -519,7 +519,7 @@ vc4_validate_bin_cl(struct drm_device *dev, if (info->func && info->func(exec, dst_pkt + 1, src_pkt + 1)) { - DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n", + DRM_DEBUG("0x%08x: packet %d (%s) failed to validate\n", src_offset, cmd, info->name); return -EINVAL; } @@ -537,7 +537,7 @@ vc4_validate_bin_cl(struct drm_device *dev, exec->ct0ea = exec->ct0ca + dst_offset; if (!exec->found_start_tile_binning_packet) { - DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n"); + DRM_DEBUG("Bin CL missing VC4_PACKET_START_TILE_BINNING\n"); return -EINVAL; } @@ -549,7 +549,7 @@ vc4_validate_bin_cl(struct drm_device *dev, * semaphore increment. */ if (!exec->found_increment_semaphore_packet || !exec->found_flush) { - DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + " + DRM_DEBUG("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + " "VC4_PACKET_FLUSH\n"); return -EINVAL; } @@ -588,11 +588,11 @@ reloc_tex(struct vc4_exec_info *exec, uint32_t remaining_size = tex->base.size - p0; if (p0 > tex->base.size - 4) { - DRM_ERROR("UBO offset greater than UBO size\n"); + DRM_DEBUG("UBO offset greater than UBO size\n"); goto fail; } if (p1 > remaining_size - 4) { - DRM_ERROR("UBO clamp would allow reads " + DRM_DEBUG("UBO clamp would allow reads " "outside of UBO\n"); goto fail; } @@ -612,14 +612,14 @@ reloc_tex(struct vc4_exec_info *exec, if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) == VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) { if (cube_map_stride) { - DRM_ERROR("Cube map stride set twice\n"); + DRM_DEBUG("Cube map stride set twice\n"); goto fail; } cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK; } if (!cube_map_stride) { - DRM_ERROR("Cube map stride not set\n"); + DRM_DEBUG("Cube map stride not set\n"); goto fail; } } @@ -660,7 +660,7 @@ reloc_tex(struct vc4_exec_info *exec, case VC4_TEXTURE_TYPE_RGBA64: case VC4_TEXTURE_TYPE_YUV422R: default: - DRM_ERROR("Texture format %d unsupported\n", type); + DRM_DEBUG("Texture format %d unsupported\n", type); goto fail; } utile_w = utile_width(cpp); @@ -713,7 +713,7 @@ reloc_tex(struct vc4_exec_info *exec, level_size = aligned_width * cpp * aligned_height; if (offset < level_size) { - DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db " + DRM_DEBUG("Level %d (%dx%d -> %dx%d) size %db " "overflowed buffer bounds (offset %d)\n", i, level_width, level_height, aligned_width, aligned_height, @@ -764,7 +764,7 @@ validate_gl_shader_rec(struct drm_device *dev, nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes; if (nr_relocs * 4 > exec->shader_rec_size) { - DRM_ERROR("overflowed shader recs reading %d handles " + DRM_DEBUG("overflowed shader recs reading %d handles " "from %d bytes left\n", nr_relocs, exec->shader_rec_size); return -EINVAL; @@ -774,7 +774,7 @@ validate_gl_shader_rec(struct drm_device *dev, exec->shader_rec_size -= nr_relocs * 4; if (packet_size > exec->shader_rec_size) { - DRM_ERROR("overflowed shader recs copying %db packet " + DRM_DEBUG("overflowed shader recs copying %db packet " "from %d bytes left\n", packet_size, exec->shader_rec_size); return -EINVAL; @@ -794,7 +794,7 @@ validate_gl_shader_rec(struct drm_device *dev, for (i = 0; i < shader_reloc_count; i++) { if (src_handles[i] > exec->bo_count) { - DRM_ERROR("Shader handle %d too big\n", src_handles[i]); + DRM_DEBUG("Shader handle %d too big\n", src_handles[i]); return -EINVAL; } @@ -810,13 +810,13 @@ validate_gl_shader_rec(struct drm_device *dev, if (((*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD) == 0) != to_vc4_bo(&bo[0]->base)->validated_shader->is_threaded) { - DRM_ERROR("Thread mode of CL and FS do not match\n"); + DRM_DEBUG("Thread mode of CL and FS do not match\n"); return -EINVAL; } if (to_vc4_bo(&bo[1]->base)->validated_shader->is_threaded || to_vc4_bo(&bo[2]->base)->validated_shader->is_threaded) { - DRM_ERROR("cs and vs cannot be threaded\n"); + DRM_DEBUG("cs and vs cannot be threaded\n"); return -EINVAL; } @@ -831,7 +831,7 @@ validate_gl_shader_rec(struct drm_device *dev, *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset; if (src_offset != 0) { - DRM_ERROR("Shaders must be at offset 0 of " + DRM_DEBUG("Shaders must be at offset 0 of " "the BO.\n"); return -EINVAL; } @@ -842,7 +842,7 @@ validate_gl_shader_rec(struct drm_device *dev, if (validated_shader->uniforms_src_size > exec->uniforms_size) { - DRM_ERROR("Uniforms src buffer overflow\n"); + DRM_DEBUG("Uniforms src buffer overflow\n"); return -EINVAL; } @@ -900,7 +900,7 @@ validate_gl_shader_rec(struct drm_device *dev, if (vbo->base.size < offset || vbo->base.size - offset < attr_size) { - DRM_ERROR("BO offset overflow (%d + %d > %zu)\n", + DRM_DEBUG("BO offset overflow (%d + %d > %zu)\n", offset, attr_size, vbo->base.size); return -EINVAL; } @@ -909,7 +909,7 @@ validate_gl_shader_rec(struct drm_device *dev, max_index = ((vbo->base.size - offset - attr_size) / stride); if (state->max_index > max_index) { - DRM_ERROR("primitives use index %d out of " + DRM_DEBUG("primitives use index %d out of " "supplied %d\n", state->max_index, max_index); return -EINVAL; diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index 0b2df5c6efb4..d3f15bf60900 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -200,7 +200,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, uint32_t clamp_reg, clamp_offset; if (sig == QPU_SIG_SMALL_IMM) { - DRM_ERROR("direct TMU read used small immediate\n"); + DRM_DEBUG("direct TMU read used small immediate\n"); return false; } @@ -209,7 +209,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, */ if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { - DRM_ERROR("direct TMU load wasn't an add\n"); + DRM_DEBUG("direct TMU load wasn't an add\n"); return false; } @@ -220,13 +220,13 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, */ clamp_reg = raddr_add_a_to_live_reg_index(inst); if (clamp_reg == ~0) { - DRM_ERROR("direct TMU load wasn't clamped\n"); + DRM_DEBUG("direct TMU load wasn't clamped\n"); return false; } clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg]; if (clamp_offset == ~0) { - DRM_ERROR("direct TMU load wasn't clamped\n"); + DRM_DEBUG("direct TMU load wasn't clamped\n"); return false; } @@ -238,7 +238,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { - DRM_ERROR("direct TMU load didn't add to a uniform\n"); + DRM_DEBUG("direct TMU load didn't add to a uniform\n"); return false; } @@ -246,14 +246,14 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, } else { if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM && raddr_b == QPU_R_UNIF)) { - DRM_ERROR("uniform read in the same instruction as " + DRM_DEBUG("uniform read in the same instruction as " "texture setup.\n"); return false; } } if (validation_state->tmu_write_count[tmu] >= 4) { - DRM_ERROR("TMU%d got too many parameters before dispatch\n", + DRM_DEBUG("TMU%d got too many parameters before dispatch\n", tmu); return false; } @@ -265,7 +265,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader, */ if (!is_direct) { if (validation_state->needs_uniform_address_update) { - DRM_ERROR("Texturing with undefined uniform address\n"); + DRM_DEBUG("Texturing with undefined uniform address\n"); return false; } @@ -336,35 +336,35 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade case QPU_SIG_LOAD_TMU1: break; default: - DRM_ERROR("uniforms address change must be " + DRM_DEBUG("uniforms address change must be " "normal math\n"); return false; } if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { - DRM_ERROR("Uniform address reset must be an ADD.\n"); + DRM_DEBUG("Uniform address reset must be an ADD.\n"); return false; } if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) { - DRM_ERROR("Uniform address reset must be unconditional.\n"); + DRM_DEBUG("Uniform address reset must be unconditional.\n"); return false; } if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP && !(inst & QPU_PM)) { - DRM_ERROR("No packing allowed on uniforms reset\n"); + DRM_DEBUG("No packing allowed on uniforms reset\n"); return false; } if (add_lri == -1) { - DRM_ERROR("First argument of uniform address write must be " + DRM_DEBUG("First argument of uniform address write must be " "an immediate value.\n"); return false; } if (validation_state->live_immediates[add_lri] != expected_offset) { - DRM_ERROR("Resetting uniforms with offset %db instead of %db\n", + DRM_DEBUG("Resetting uniforms with offset %db instead of %db\n", validation_state->live_immediates[add_lri], expected_offset); return false; @@ -372,7 +372,7 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { - DRM_ERROR("Second argument of uniform address write must be " + DRM_DEBUG("Second argument of uniform address write must be " "a uniform.\n"); return false; } @@ -417,7 +417,7 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader, switch (waddr) { case QPU_W_UNIFORMS_ADDRESS: if (is_b) { - DRM_ERROR("relative uniforms address change " + DRM_DEBUG("relative uniforms address change " "unsupported\n"); return false; } @@ -452,11 +452,11 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader, /* XXX: I haven't thought about these, so don't support them * for now. */ - DRM_ERROR("Unsupported waddr %d\n", waddr); + DRM_DEBUG("Unsupported waddr %d\n", waddr); return false; case QPU_W_VPM_ADDR: - DRM_ERROR("General VPM DMA unsupported\n"); + DRM_DEBUG("General VPM DMA unsupported\n"); return false; case QPU_W_VPM: @@ -559,7 +559,7 @@ check_instruction_writes(struct vc4_validated_shader_info *validated_shader, bool ok; if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) { - DRM_ERROR("ADD and MUL both set up textures\n"); + DRM_DEBUG("ADD and MUL both set up textures\n"); return false; } @@ -588,7 +588,7 @@ check_branch(uint64_t inst, * there's no need for it. */ if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) { - DRM_ERROR("branch instruction at %d wrote a register.\n", + DRM_DEBUG("branch instruction at %d wrote a register.\n", validation_state->ip); return false; } @@ -614,7 +614,7 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader, validated_shader->uniforms_size += 4; if (validation_state->needs_uniform_address_update) { - DRM_ERROR("Uniform read with undefined uniform " + DRM_DEBUG("Uniform read with undefined uniform " "address\n"); return false; } @@ -660,19 +660,19 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) continue; if (ip - last_branch < 4) { - DRM_ERROR("Branch at %d during delay slots\n", ip); + DRM_DEBUG("Branch at %d during delay slots\n", ip); return false; } last_branch = ip; if (inst & QPU_BRANCH_REG) { - DRM_ERROR("branching from register relative " + DRM_DEBUG("branching from register relative " "not supported\n"); return false; } if (!(inst & QPU_BRANCH_REL)) { - DRM_ERROR("relative branching required\n"); + DRM_DEBUG("relative branching required\n"); return false; } @@ -682,13 +682,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) * end of the shader object. */ if (branch_imm % sizeof(inst) != 0) { - DRM_ERROR("branch target not aligned\n"); + DRM_DEBUG("branch target not aligned\n"); return false; } branch_target_ip = after_delay_ip + (branch_imm >> 3); if (branch_target_ip >= validation_state->max_ip) { - DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n", + DRM_DEBUG("Branch at %d outside of shader (ip %d/%d)\n", ip, branch_target_ip, validation_state->max_ip); return false; @@ -699,7 +699,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) * the shader. */ if (after_delay_ip >= validation_state->max_ip) { - DRM_ERROR("Branch at %d continues past shader end " + DRM_DEBUG("Branch at %d continues past shader end " "(%d/%d)\n", ip, after_delay_ip, validation_state->max_ip); return false; @@ -709,7 +709,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) } if (max_branch_target > validation_state->max_ip - 3) { - DRM_ERROR("Branch landed after QPU_SIG_PROG_END"); + DRM_DEBUG("Branch landed after QPU_SIG_PROG_END"); return false; } @@ -750,7 +750,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state) return true; if (texturing_in_progress(validation_state)) { - DRM_ERROR("Branch target landed during TMU setup\n"); + DRM_DEBUG("Branch target landed during TMU setup\n"); return false; } @@ -837,7 +837,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) case QPU_SIG_LAST_THREAD_SWITCH: if (!check_instruction_writes(validated_shader, &validation_state)) { - DRM_ERROR("Bad write at ip %d\n", ip); + DRM_DEBUG("Bad write at ip %d\n", ip); goto fail; } @@ -855,7 +855,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) validated_shader->is_threaded = true; if (ip < last_thread_switch_ip + 3) { - DRM_ERROR("Thread switch too soon after " + DRM_DEBUG("Thread switch too soon after " "last switch at ip %d\n", ip); goto fail; } @@ -867,7 +867,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) case QPU_SIG_LOAD_IMM: if (!check_instruction_writes(validated_shader, &validation_state)) { - DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip); + DRM_DEBUG("Bad LOAD_IMM write at ip %d\n", ip); goto fail; } break; @@ -878,14 +878,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) goto fail; if (ip < last_thread_switch_ip + 3) { - DRM_ERROR("Branch in thread switch at ip %d", + DRM_DEBUG("Branch in thread switch at ip %d", ip); goto fail; } break; default: - DRM_ERROR("Unsupported QPU signal %d at " + DRM_DEBUG("Unsupported QPU signal %d at " "instruction %d\n", sig, ip); goto fail; } @@ -898,7 +898,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) } if (ip == validation_state.max_ip) { - DRM_ERROR("shader failed to terminate before " + DRM_DEBUG("shader failed to terminate before " "shader BO end at %zd\n", shader_obj->base.size); goto fail; @@ -907,7 +907,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) /* Might corrupt other thread */ if (validated_shader->is_threaded && validation_state.all_registers_used) { - DRM_ERROR("Shader uses threading, but uses the upper " + DRM_DEBUG("Shader uses threading, but uses the upper " "half of the registers, too\n"); goto fail; } diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 12289673f457..2524ff116f00 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -190,7 +190,7 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, return ERR_CAST(obj); ret = drm_gem_handle_create(file, &obj->base, handle); - drm_gem_object_unreference_unlocked(&obj->base); + drm_gem_object_put_unlocked(&obj->base); if (ret) goto err; @@ -245,7 +245,7 @@ static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, *offset = drm_vma_node_offset_addr(&obj->vma_node); unref: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 3109c8308eb5..8fd52f211e9d 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -213,7 +213,7 @@ err_fence: dma_fence_put(fence); } err: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 63d35c7e416c..49a3d8d5a249 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -122,7 +122,6 @@ static struct drm_driver driver = { .dumb_create = virtio_gpu_mode_dumb_create, .dumb_map_offset = virtio_gpu_mode_dumb_mmap, - .dumb_destroy = virtio_gpu_mode_dumb_destroy, #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 3a66abb8fd50..da2fb585fea4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -236,9 +236,6 @@ struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev, int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle); int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset_p); diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c index 046e28b69d99..15d18fd0c64b 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c @@ -308,7 +308,7 @@ static int virtio_gpu_fbdev_destroy(struct drm_device *dev, return 0; } -static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = { +static const struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = { .fb_probe = virtio_gpufb_create, }; diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index cc025d8fbe19..72ad7b103448 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -118,13 +118,6 @@ fail: return ret; } -int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file_priv, handle); -} - int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset_p) diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c index bf6635826159..c1572843003a 100644 --- a/drivers/staging/vboxvideo/vbox_fb.c +++ b/drivers/staging/vboxvideo/vbox_fb.c @@ -343,7 +343,7 @@ void vbox_fbdev_fini(struct drm_device *dev) vbox_bo_unpin(bo); vbox_bo_unreserve(bo); } - drm_gem_object_unreference_unlocked(afb->obj); + drm_gem_object_put_unlocked(afb->obj); afb->obj = NULL; } drm_fb_helper_fini(&fbdev->helper); diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c index d0c6ec75a3c7..80bd039fa08e 100644 --- a/drivers/staging/vboxvideo/vbox_main.c +++ b/drivers/staging/vboxvideo/vbox_main.c @@ -40,7 +40,7 @@ static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb) struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb); if (vbox_fb->obj) - drm_gem_object_unreference_unlocked(vbox_fb->obj); + drm_gem_object_put_unlocked(vbox_fb->obj); drm_framebuffer_cleanup(fb); kfree(fb); @@ -198,7 +198,7 @@ static struct drm_framebuffer *vbox_user_framebuffer_create( err_free_vbox_fb: kfree(vbox_fb); err_unref_obj: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(ret); } @@ -472,7 +472,7 @@ int vbox_dumb_create(struct drm_file *file, return ret; ret = drm_gem_handle_create(file, gobj, &handle); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (ret) return ret; @@ -525,7 +525,7 @@ vbox_dumb_mmap_offset(struct drm_file *file, bo = gem_to_vbox_bo(obj); *offset = vbox_bo_mmap_offset(bo); - drm_gem_object_unreference(obj); + drm_gem_object_put(obj); ret = 0; out_unlock: diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c index 996da1c79158..e5b6383984e7 100644 --- a/drivers/staging/vboxvideo/vbox_mode.c +++ b/drivers/staging/vboxvideo/vbox_mode.c @@ -812,7 +812,7 @@ out_unmap_bo: out_unreserve_bo: vbox_bo_unreserve(bo); out_unref_obj: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 3aa3809ab524..7277783a4ff0 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -82,19 +82,10 @@ #include <drm/drm_sysfs.h> #include <drm/drm_vblank.h> #include <drm/drm_irq.h> - +#include <drm/drm_device.h> struct module; -struct drm_device; -struct drm_agp_head; -struct drm_local_map; -struct drm_device_dma; -struct drm_gem_object; -struct drm_master; -struct drm_vblank_crtc; -struct drm_vma_offset_manager; - struct device_node; struct videomode; struct reservation_object; @@ -306,170 +297,6 @@ struct pci_controller; /** - * DRM device structure. This structure represent a complete card that - * may contain multiple heads. - */ -struct drm_device { - struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ - int if_version; /**< Highest interface version set */ - - /** \name Lifetime Management */ - /*@{ */ - struct kref ref; /**< Object ref-count */ - struct device *dev; /**< Device structure of bus-device */ - struct drm_driver *driver; /**< DRM driver managing the device */ - void *dev_private; /**< DRM driver private data */ - struct drm_minor *control; /**< Control node */ - struct drm_minor *primary; /**< Primary node */ - struct drm_minor *render; /**< Render node */ - bool registered; - - /* currently active master for this device. Protected by master_mutex */ - struct drm_master *master; - - atomic_t unplugged; /**< Flag whether dev is dead */ - struct inode *anon_inode; /**< inode for private address-space */ - char *unique; /**< unique name of the device */ - /*@} */ - - /** \name Locks */ - /*@{ */ - struct mutex struct_mutex; /**< For others */ - struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */ - /*@} */ - - /** \name Usage Counters */ - /*@{ */ - int open_count; /**< Outstanding files open, protected by drm_global_mutex. */ - spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ - int buf_use; /**< Buffers in use -- cannot alloc */ - atomic_t buf_alloc; /**< Buffer allocation in progress */ - /*@} */ - - struct mutex filelist_mutex; - struct list_head filelist; - - /** \name Memory management */ - /*@{ */ - struct list_head maplist; /**< Linked list of regions */ - struct drm_open_hash map_hash; /**< User token hash table for maps */ - - /** \name Context handle management */ - /*@{ */ - struct list_head ctxlist; /**< Linked list of context handles */ - struct mutex ctxlist_mutex; /**< For ctxlist */ - - struct idr ctx_idr; - - struct list_head vmalist; /**< List of vmas (for debugging) */ - - /*@} */ - - /** \name DMA support */ - /*@{ */ - struct drm_device_dma *dma; /**< Optional pointer for DMA support */ - /*@} */ - - /** \name Context support */ - /*@{ */ - - __volatile__ long context_flag; /**< Context swapping flag */ - int last_context; /**< Last current context */ - /*@} */ - - /** - * @irq_enabled: - * - * Indicates that interrupt handling is enabled, specifically vblank - * handling. Drivers which don't use drm_irq_install() need to set this - * to true manually. - */ - bool irq_enabled; - int irq; - - /** - * @vblank_disable_immediate: - * - * If true, vblank interrupt will be disabled immediately when the - * refcount drops to zero, as opposed to via the vblank disable - * timer. - * - * This can be set to true it the hardware has a working vblank counter - * with high-precision timestamping (otherwise there are races) and the - * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off() - * appropriately. See also @max_vblank_count and - * &drm_crtc_funcs.get_vblank_counter. - */ - bool vblank_disable_immediate; - - /** - * @vblank: - * - * Array of vblank tracking structures, one per &struct drm_crtc. For - * historical reasons (vblank support predates kernel modesetting) this - * is free-standing and not part of &struct drm_crtc itself. It must be - * initialized explicitly by calling drm_vblank_init(). - */ - struct drm_vblank_crtc *vblank; - - spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ - spinlock_t vbl_lock; - - /** - * @max_vblank_count: - * - * Maximum value of the vblank registers. This value +1 will result in a - * wrap-around of the vblank register. It is used by the vblank core to - * handle wrap-arounds. - * - * If set to zero the vblank core will try to guess the elapsed vblanks - * between times when the vblank interrupt is disabled through - * high-precision timestamps. That approach is suffering from small - * races and imprecision over longer time periods, hence exposing a - * hardware vblank counter is always recommended. - * - * If non-zeor, &drm_crtc_funcs.get_vblank_counter must be set. - */ - u32 max_vblank_count; /**< size of vblank counter register */ - - /** - * List of events - */ - struct list_head vblank_event_list; - spinlock_t event_lock; - - /*@} */ - - struct drm_agp_head *agp; /**< AGP data */ - - struct pci_dev *pdev; /**< PCI device structure */ -#ifdef __alpha__ - struct pci_controller *hose; -#endif - - struct drm_sg_mem *sg; /**< Scatter gather memory */ - unsigned int num_crtcs; /**< Number of CRTCs on this device */ - - struct { - int context; - struct drm_hw_lock *lock; - } sigdata; - - struct drm_local_map *agp_buffer_map; - unsigned int agp_buffer_token; - - struct drm_mode_config mode_config; /**< Current mode config */ - - /** \name GEM information */ - /*@{ */ - struct mutex object_name_lock; - struct idr object_name_idr; - struct drm_vma_offset_manager *vma_offset_manager; - /*@} */ - int switch_power_state; -}; - -/** * drm_drv_uses_atomic_modeset - check if the driver implements * atomic_commit() * @dev: DRM device @@ -493,19 +320,6 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev, return ((dev->driver->driver_features & feature) ? 1 : 0); } -static inline void drm_device_set_unplugged(struct drm_device *dev) -{ - smp_wmb(); - atomic_set(&dev->unplugged, 1); -} - -static inline int drm_device_is_unplugged(struct drm_device *dev) -{ - int ret = atomic_read(&dev->unplugged); - smp_rmb(); - return ret; -} - /******************************************************************/ /** \name Internal function definitions */ /*@{*/ diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h new file mode 100644 index 000000000000..e21af87a2f3c --- /dev/null +++ b/include/drm/drm_device.h @@ -0,0 +1,190 @@ +#ifndef _DRM_DEVICE_H_ +#define _DRM_DEVICE_H_ + +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/idr.h> + +#include <drm/drm_hashtab.h> +#include <drm/drm_mode_config.h> + +struct drm_driver; +struct drm_minor; +struct drm_master; +struct drm_device_dma; +struct drm_vblank_crtc; +struct drm_sg_mem; +struct drm_local_map; +struct drm_vma_offset_manager; + +struct inode; + +struct pci_dev; +struct pci_controller; + +/** + * DRM device structure. This structure represent a complete card that + * may contain multiple heads. + */ +struct drm_device { + struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ + int if_version; /**< Highest interface version set */ + + /** \name Lifetime Management */ + /*@{ */ + struct kref ref; /**< Object ref-count */ + struct device *dev; /**< Device structure of bus-device */ + struct drm_driver *driver; /**< DRM driver managing the device */ + void *dev_private; /**< DRM driver private data */ + struct drm_minor *control; /**< Control node */ + struct drm_minor *primary; /**< Primary node */ + struct drm_minor *render; /**< Render node */ + bool registered; + + /* currently active master for this device. Protected by master_mutex */ + struct drm_master *master; + + atomic_t unplugged; /**< Flag whether dev is dead */ + struct inode *anon_inode; /**< inode for private address-space */ + char *unique; /**< unique name of the device */ + /*@} */ + + /** \name Locks */ + /*@{ */ + struct mutex struct_mutex; /**< For others */ + struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */ + /*@} */ + + /** \name Usage Counters */ + /*@{ */ + int open_count; /**< Outstanding files open, protected by drm_global_mutex. */ + spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ + int buf_use; /**< Buffers in use -- cannot alloc */ + atomic_t buf_alloc; /**< Buffer allocation in progress */ + /*@} */ + + struct mutex filelist_mutex; + struct list_head filelist; + + /** \name Memory management */ + /*@{ */ + struct list_head maplist; /**< Linked list of regions */ + struct drm_open_hash map_hash; /**< User token hash table for maps */ + + /** \name Context handle management */ + /*@{ */ + struct list_head ctxlist; /**< Linked list of context handles */ + struct mutex ctxlist_mutex; /**< For ctxlist */ + + struct idr ctx_idr; + + struct list_head vmalist; /**< List of vmas (for debugging) */ + + /*@} */ + + /** \name DMA support */ + /*@{ */ + struct drm_device_dma *dma; /**< Optional pointer for DMA support */ + /*@} */ + + /** \name Context support */ + /*@{ */ + + __volatile__ long context_flag; /**< Context swapping flag */ + int last_context; /**< Last current context */ + /*@} */ + + /** + * @irq_enabled: + * + * Indicates that interrupt handling is enabled, specifically vblank + * handling. Drivers which don't use drm_irq_install() need to set this + * to true manually. + */ + bool irq_enabled; + int irq; + + /** + * @vblank_disable_immediate: + * + * If true, vblank interrupt will be disabled immediately when the + * refcount drops to zero, as opposed to via the vblank disable + * timer. + * + * This can be set to true it the hardware has a working vblank counter + * with high-precision timestamping (otherwise there are races) and the + * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off() + * appropriately. See also @max_vblank_count and + * &drm_crtc_funcs.get_vblank_counter. + */ + bool vblank_disable_immediate; + + /** + * @vblank: + * + * Array of vblank tracking structures, one per &struct drm_crtc. For + * historical reasons (vblank support predates kernel modesetting) this + * is free-standing and not part of &struct drm_crtc itself. It must be + * initialized explicitly by calling drm_vblank_init(). + */ + struct drm_vblank_crtc *vblank; + + spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ + spinlock_t vbl_lock; + + /** + * @max_vblank_count: + * + * Maximum value of the vblank registers. This value +1 will result in a + * wrap-around of the vblank register. It is used by the vblank core to + * handle wrap-arounds. + * + * If set to zero the vblank core will try to guess the elapsed vblanks + * between times when the vblank interrupt is disabled through + * high-precision timestamps. That approach is suffering from small + * races and imprecision over longer time periods, hence exposing a + * hardware vblank counter is always recommended. + * + * If non-zeor, &drm_crtc_funcs.get_vblank_counter must be set. + */ + u32 max_vblank_count; /**< size of vblank counter register */ + + /** + * List of events + */ + struct list_head vblank_event_list; + spinlock_t event_lock; + + /*@} */ + + struct drm_agp_head *agp; /**< AGP data */ + + struct pci_dev *pdev; /**< PCI device structure */ +#ifdef __alpha__ + struct pci_controller *hose; +#endif + + struct drm_sg_mem *sg; /**< Scatter gather memory */ + unsigned int num_crtcs; /**< Number of CRTCs on this device */ + + struct { + int context; + struct drm_hw_lock *lock; + } sigdata; + + struct drm_local_map *agp_buffer_map; + unsigned int agp_buffer_token; + + struct drm_mode_config mode_config; /**< Current mode config */ + + /** \name GEM information */ + /*@{ */ + struct mutex object_name_lock; + struct idr object_name_idr; + struct drm_vma_offset_manager *vma_offset_manager; + /*@} */ + int switch_power_state; +}; + +#endif diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 505c91354802..71bbaaec836d 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -30,7 +30,8 @@ #include <linux/list.h> #include <linux/irqreturn.h> -struct drm_device; +#include <drm/drm_device.h> + struct drm_file; struct drm_gem_object; struct drm_master; @@ -613,7 +614,24 @@ void drm_dev_unregister(struct drm_device *dev); void drm_dev_ref(struct drm_device *dev); void drm_dev_unref(struct drm_device *dev); void drm_put_dev(struct drm_device *dev); -void drm_unplug_dev(struct drm_device *dev); +void drm_dev_unplug(struct drm_device *dev); + +/** + * drm_dev_is_unplugged - is a DRM device unplugged + * @dev: DRM device + * + * This function can be called to check whether a hotpluggable is unplugged. + * Unplugging itself is singalled through drm_dev_unplug(). If a device is + * unplugged, these two functions guarantee that any store before calling + * drm_dev_unplug() is visible to callers of this function after it completes + */ +static inline int drm_dev_is_unplugged(struct drm_device *dev) +{ + int ret = atomic_read(&dev->unplugged); + smp_rmb(); + return ret; +} + int drm_dev_set_unique(struct drm_device *dev, const char *name); diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index b42529e0fae0..58a739bf15f1 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -73,11 +73,6 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *drm, struct drm_mode_create_dumb *args); -/* map memory region for DRM framebuffer to user space */ -int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *drm, u32 handle, - u64 *offset); - /* set vm_flags and we can change the VM attribute to other one at here */ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h index a6c387f91eff..d554ded60ee9 100644 --- a/include/drm/tinydrm/tinydrm-helpers.h +++ b/include/drm/tinydrm/tinydrm-helpers.h @@ -43,7 +43,8 @@ void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb, void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr, struct drm_framebuffer *fb, struct drm_clip_rect *clip, bool swap); -int tinydrm_xrgb8888_to_gray8(u8 *dst, struct drm_framebuffer *fb); +void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_clip_rect *clip); struct backlight_device *tinydrm_of_find_backlight(struct device *dev); int tinydrm_enable_backlight(struct backlight_device *backlight); diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 156cfd330b66..21fc84d82d41 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -254,6 +254,9 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, unsigned *pshared_count, struct dma_fence ***pshared); +int reservation_object_copy_fences(struct reservation_object *dst, + struct reservation_object *src); + long reservation_object_wait_timeout_rcu(struct reservation_object *obj, bool wait_all, bool intr, unsigned long timeout); diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h index 551628e571f9..afae87004963 100644 --- a/include/uapi/drm/vc4_drm.h +++ b/include/uapi/drm/vc4_drm.h @@ -155,6 +155,16 @@ struct drm_vc4_submit_cl { __u32 pad:24; #define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0) +/* By default, the kernel gets to choose the order that the tiles are + * rendered in. If this is set, then the tiles will be rendered in a + * raster order, with the right-to-left vs left-to-right and + * top-to-bottom vs bottom-to-top dictated by + * VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping + * blits to be implemented using the 3D engine. + */ +#define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1) +#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2) +#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3) __u32 flags; /* Returned value of the seqno of this render job (for the @@ -294,6 +304,7 @@ struct drm_vc4_get_hang_state { #define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3 #define DRM_VC4_PARAM_SUPPORTS_ETC1 4 #define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5 +#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6 struct drm_vc4_get_param { __u32 param; |