diff options
Diffstat (limited to 'drivers/gpu')
202 files changed, 10448 insertions, 3558 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 24c2d7caedd5..c3413b6adb17 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -167,6 +167,8 @@ config DRM_SAVAGE source "drivers/gpu/drm/exynos/Kconfig" +source "drivers/gpu/drm/rockchip/Kconfig" + source "drivers/gpu/drm/vmwgfx/Kconfig" source "drivers/gpu/drm/gma500/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 47d89869c5df..e620807418ea 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_TDFX) += tdfx/ obj-$(CONFIG_DRM_R128) += r128/ +obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ obj-$(CONFIG_DRM_RADEON)+= radeon/ obj-$(CONFIG_DRM_MGA) += mga/ obj-$(CONFIG_DRM_I810) += i810/ @@ -49,6 +50,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/ obj-$(CONFIG_DRM_VIA) +=via/ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ obj-$(CONFIG_DRM_EXYNOS) +=exynos/ +obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/ obj-$(CONFIG_DRM_GMA500) += gma500/ obj-$(CONFIG_DRM_UDL) += udl/ obj-$(CONFIG_DRM_AST) += ast/ @@ -66,4 +68,3 @@ obj-$(CONFIG_DRM_IMX) += imx/ obj-y += i2c/ obj-y += panel/ obj-y += bridge/ -obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 102cd36799b1..fe5c543599b0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -31,7 +31,6 @@ #include <uapi/linux/kfd_ioctl.h> #include <linux/time.h> #include <linux/mm.h> -#include <linux/uaccess.h> #include <uapi/asm-generic/mman-common.h> #include <asm/processor.h> #include "kfd_priv.h" @@ -102,21 +101,28 @@ struct device *kfd_chardev(void) static int kfd_open(struct inode *inode, struct file *filep) { struct kfd_process *process; + bool is_32bit_user_mode; if (iminor(inode) != 0) return -ENODEV; + is_32bit_user_mode = is_compat_task(); + + if (is_32bit_user_mode == true) { + dev_warn(kfd_device, + "Process %d (32-bit) failed to open /dev/kfd\n" + "32-bit processes are not supported by amdkfd\n", + current->pid); + return -EPERM; + } + process = kfd_create_process(current); if (IS_ERR(process)) return PTR_ERR(process); - process->is_32bit_user_mode = is_compat_task(); - dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", process->pasid, process->is_32bit_user_mode); - kfd_init_apertures(process); - return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 924e90c072e5..f44d6737b65a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -320,6 +320,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) { int retval; struct mqd_manager *mqd; + bool prev_active = false; BUG_ON(!dqm || !q || !q->mqd); @@ -330,10 +331,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) return -ENOMEM; } - retval = mqd->update_mqd(mqd, q->mqd, &q->properties); if (q->properties.is_active == true) + prev_active = true; + + /* + * + * check active state vs. the previous state + * and modify counter accordingly + */ + retval = mqd->update_mqd(mqd, q->mqd, &q->properties); + if ((q->properties.is_active == true) && (prev_active == false)) dqm->queue_count++; - else + else if ((q->properties.is_active == false) && (prev_active == true)) dqm->queue_count--; if (sched_policy != KFD_SCHED_POLICY_NO_HWS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 66df4da01c29..e64aa99e5e41 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -299,13 +299,13 @@ int kfd_init_apertures(struct kfd_process *process) struct kfd_dev *dev; struct kfd_process_device *pdd; - mutex_lock(&process->mutex); - /*Iterating over all devices*/ while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL && id < NUM_OF_SUPPORTED_GPUS) { pdd = kfd_get_process_device_data(dev, process, 1); + if (!pdd) + return -1; /* * For 64 bit process aperture will be statically reserved in @@ -348,8 +348,6 @@ int kfd_init_apertures(struct kfd_process *process) id++; } - mutex_unlock(&process->mutex); - return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 9abac48de499..935071410724 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -221,8 +221,14 @@ static int acquire_packet_buffer(struct kernel_queue *kq, queue_size_dwords; if (packet_size_in_dwords >= queue_size_dwords || - packet_size_in_dwords >= available_size) + packet_size_in_dwords >= available_size) { + /* + * make sure calling functions know + * acquire_packet_buffer() failed + */ + *buffer_ptr = NULL; return -ENOMEM; + } if (wptr + packet_size_in_dwords >= queue_size_dwords) { while (wptr > 0) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 2458ab7c0c6e..4c25ef504f79 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c @@ -32,8 +32,7 @@ int kfd_pasid_init(void) { pasid_limit = max_num_of_processes; - pasid_bitmap = kzalloc(DIV_ROUND_UP(pasid_limit, BITS_PER_BYTE), - GFP_KERNEL); + pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); if (!pasid_bitmap) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index b4f49ac13334..3c76ef05cbcf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -26,6 +26,8 @@ #include <linux/slab.h> #include <linux/amd-iommu.h> #include <linux/notifier.h> +#include <linux/compat.h> + struct mm_struct; #include "kfd_priv.h" @@ -196,7 +198,7 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu) mmdrop(p->mm); work = (struct kfd_process_release_work *) - kmalloc(sizeof(struct kfd_process_release_work), GFP_KERNEL); + kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC); if (work) { INIT_WORK((struct work_struct *) work, kfd_process_wq_release); @@ -285,8 +287,15 @@ static struct kfd_process *create_process(const struct task_struct *thread) if (err != 0) goto err_process_pqm_init; + /* init process apertures*/ + process->is_32bit_user_mode = is_compat_task(); + if (kfd_init_apertures(process) != 0) + goto err_init_apretures; + return process; +err_init_apretures: + pqm_uninit(&process->pqm); err_process_pqm_init: hash_del_rcu(&process->kfd_processes); synchronize_rcu(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 5733e2859e8a..cca1708fd811 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -700,8 +700,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.simd_per_cu); sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu", dev->node_props.max_slots_scratch_cu); - sysfs_show_32bit_prop(buffer, "engine_id", - dev->node_props.engine_id); sysfs_show_32bit_prop(buffer, "vendor_id", dev->node_props.vendor_id); sysfs_show_32bit_prop(buffer, "device_id", @@ -715,6 +713,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->gpu->kgd)); sysfs_show_64bit_prop(buffer, "local_mem_size", kfd2kgd->get_vmem_size(dev->gpu->kgd)); + + sysfs_show_32bit_prop(buffer, "fw_version", + kfd2kgd->get_fw_version( + dev->gpu->kgd, + KGD_ENGINE_MEC1)); + } ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", @@ -917,7 +921,7 @@ static int kfd_build_sysfs_node_tree(void) uint32_t i = 0; list_for_each_entry(dev, &topology_device_list, list) { - ret = kfd_build_sysfs_node_entry(dev, 0); + ret = kfd_build_sysfs_node_entry(dev, i); if (ret < 0) return ret; i++; diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 9c729dd8dd50..47b551970a14 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -45,6 +45,17 @@ enum kgd_memory_pool { KGD_POOL_FRAMEBUFFER = 3, }; +enum kgd_engine_type { + KGD_ENGINE_PFP = 1, + KGD_ENGINE_ME, + KGD_ENGINE_CE, + KGD_ENGINE_MEC1, + KGD_ENGINE_MEC2, + KGD_ENGINE_RLC, + KGD_ENGINE_SDMA, + KGD_ENGINE_MAX +}; + struct kgd2kfd_shared_resources { /* Bit n == 1 means VMID n is available for KFD. */ unsigned int compute_vmid_bitmap; @@ -137,6 +148,8 @@ struct kgd2kfd_calls { * * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot. * + * @get_fw_version: Returns FW versions from the header + * * This structure contains function pointers to services that the kgd driver * provides to amdkfd driver. * @@ -176,6 +189,8 @@ struct kfd2kgd_calls { int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id); + uint16_t (*get_fw_version)(struct kgd_dev *kgd, + enum kgd_engine_type type); }; bool kgd2kfd_init(unsigned interface_version, diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 908e5316eac4..b01420c84864 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -486,7 +486,6 @@ static struct platform_driver armada_drm_platform_driver = { .remove = armada_drm_remove, .driver = { .name = "armada-drm", - .owner = THIS_MODULE, }, .id_table = armada_drm_platform_ids, }; diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 7496f55611a5..ef5feeecec84 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -226,7 +226,7 @@ struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev, obj->dev_addr = DMA_ERROR_CODE; - mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping; + mapping = file_inode(obj->obj.filp)->i_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index d3b46746b611..ff5f034cc405 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -243,12 +243,6 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, if (state->plane_states[index]) return state->plane_states[index]; - /* - * TODO: We currently don't have per-plane mutexes. So instead of trying - * crazy tricks with deferring plane->crtc and hoping for the best just - * grab all crtc locks. Once we have per-plane locks we must update this - * to only take the plane mutex. - */ ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); if (ret) return ERR_PTR(ret); @@ -350,7 +344,8 @@ EXPORT_SYMBOL(drm_atomic_get_connector_state); /** * drm_atomic_set_crtc_for_plane - set crtc for plane - * @plane_state: atomic state object for the plane + * @state: the incoming atomic state + * @plane: the plane whose incoming state to update * @crtc: crtc to use for the plane * * Changing the assigned crtc for a plane requires us to grab the lock and state @@ -363,20 +358,35 @@ EXPORT_SYMBOL(drm_atomic_get_connector_state); * sequence must be restarted. All other errors are fatal. */ int -drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, - struct drm_crtc *crtc) +drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state, + struct drm_plane *plane, struct drm_crtc *crtc) { + struct drm_plane_state *plane_state = + drm_atomic_get_plane_state(state, plane); struct drm_crtc_state *crtc_state; + if (WARN_ON(IS_ERR(plane_state))) + return PTR_ERR(plane_state); + + if (plane_state->crtc) { + crtc_state = drm_atomic_get_crtc_state(plane_state->state, + plane_state->crtc); + if (WARN_ON(IS_ERR(crtc_state))) + return PTR_ERR(crtc_state); + + crtc_state->plane_mask &= ~(1 << drm_plane_index(plane)); + } + + plane_state->crtc = crtc; + if (crtc) { crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); + crtc_state->plane_mask |= (1 << drm_plane_index(plane)); } - plane_state->crtc = crtc; - if (crtc) DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n", plane_state, crtc->base.id); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index a17b8e9c0a81..bbdbe4721573 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -61,7 +61,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, struct drm_crtc_state *crtc_state; if (plane->state->crtc) { - crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)]; + crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)]; if (WARN_ON(!crtc_state)) return; @@ -751,6 +751,33 @@ static void wait_for_fences(struct drm_device *dev, } } +static bool framebuffer_changed(struct drm_device *dev, + struct drm_atomic_state *old_state, + struct drm_crtc *crtc) +{ + struct drm_plane *plane; + struct drm_plane_state *old_plane_state; + int nplanes = old_state->dev->mode_config.num_total_plane; + int i; + + for (i = 0; i < nplanes; i++) { + plane = old_state->planes[i]; + old_plane_state = old_state->plane_states[i]; + + if (!plane) + continue; + + if (plane->state->crtc != crtc && + old_plane_state->crtc != crtc) + continue; + + if (plane->state->fb != old_plane_state->fb) + return true; + } + + return false; +} + /** * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs * @dev: DRM device @@ -758,7 +785,9 @@ static void wait_for_fences(struct drm_device *dev, * * Helper to, after atomic commit, wait for vblanks on all effected * crtcs (ie. before cleaning up old framebuffers using - * drm_atomic_helper_cleanup_planes()) + * drm_atomic_helper_cleanup_planes()). It will only wait on crtcs where the + * framebuffers have actually changed to optimize for the legacy cursor and + * plane update use-case. */ void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, @@ -784,6 +813,9 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, if (!crtc->state->enable) continue; + if (!framebuffer_changed(dev, old_state, crtc)) + continue; + ret = drm_crtc_vblank_get(crtc); if (ret != 0) continue; @@ -1014,6 +1046,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, for (i = 0; i < nplanes; i++) { struct drm_plane_helper_funcs *funcs; struct drm_plane *plane = old_state->planes[i]; + struct drm_plane_state *old_plane_state; if (!plane) continue; @@ -1023,7 +1056,9 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, if (!funcs || !funcs->atomic_update) continue; - funcs->atomic_update(plane); + old_plane_state = old_state->plane_states[i]; + + funcs->atomic_update(plane, old_plane_state); } for (i = 0; i < ncrtcs; i++) { @@ -1187,7 +1222,7 @@ retry: goto fail; } - ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); + ret = drm_atomic_set_crtc_for_plane(state, plane, crtc); if (ret != 0) goto fail; drm_atomic_set_fb_for_plane(plane_state, fb); @@ -1243,6 +1278,17 @@ int drm_atomic_helper_disable_plane(struct drm_plane *plane) struct drm_plane_state *plane_state; int ret = 0; + /* + * FIXME: Without plane->crtc set we can't get at the implicit legacy + * acquire context. The real fix will be to wire the acquire ctx through + * everywhere we need it, but meanwhile prevent chaos by just skipping + * this noop. The critical case is the cursor ioctls which a) only grab + * crtc/cursor-plane locks (so we need the crtc to get at the right + * acquire context) and b) can try to disable the plane multiple times. + */ + if (!plane->crtc) + return 0; + state = drm_atomic_state_alloc(plane->dev); if (!state) return -ENOMEM; @@ -1255,7 +1301,7 @@ retry: goto fail; } - ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + ret = drm_atomic_set_crtc_for_plane(state, plane, NULL); if (ret != 0) goto fail; drm_atomic_set_fb_for_plane(plane_state, NULL); @@ -1406,11 +1452,24 @@ retry: goto fail; } + primary_state = drm_atomic_get_plane_state(state, crtc->primary); + if (IS_ERR(primary_state)) { + ret = PTR_ERR(primary_state); + goto fail; + } + if (!set->mode) { WARN_ON(set->fb); WARN_ON(set->num_connectors); crtc_state->enable = false; + + ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, NULL); + if (ret != 0) + goto fail; + + drm_atomic_set_fb_for_plane(primary_state, NULL); + goto commit; } @@ -1420,13 +1479,7 @@ retry: crtc_state->enable = true; drm_mode_copy(&crtc_state->mode, set->mode); - primary_state = drm_atomic_get_plane_state(state, crtc->primary); - if (IS_ERR(primary_state)) { - ret = PTR_ERR(primary_state); - goto fail; - } - - ret = drm_atomic_set_crtc_for_plane(primary_state, crtc); + ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, crtc); if (ret != 0) goto fail; drm_atomic_set_fb_for_plane(primary_state, set->fb); @@ -1698,7 +1751,7 @@ retry: goto fail; } - ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); + ret = drm_atomic_set_crtc_for_plane(state, plane, crtc); if (ret != 0) goto fail; drm_atomic_set_fb_for_plane(plane_state, fb); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 589a921d4313..5213da499d39 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -721,6 +721,12 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) drm_mode_object_put(dev, &crtc->base); list_del(&crtc->head); dev->mode_config.num_crtc--; + + WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state); + if (crtc->state && crtc->funcs->atomic_destroy_state) + crtc->funcs->atomic_destroy_state(crtc, crtc->state); + + memset(crtc, 0, sizeof(*crtc)); } EXPORT_SYMBOL(drm_crtc_cleanup); @@ -904,6 +910,11 @@ void drm_connector_cleanup(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct drm_display_mode *mode, *t; + if (connector->tile_group) { + drm_mode_put_tile_group(dev, connector->tile_group); + connector->tile_group = NULL; + } + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) drm_mode_remove(connector, mode); @@ -918,6 +929,13 @@ void drm_connector_cleanup(struct drm_connector *connector) connector->name = NULL; list_del(&connector->head); dev->mode_config.num_connector--; + + WARN_ON(connector->state && !connector->funcs->atomic_destroy_state); + if (connector->state && connector->funcs->atomic_destroy_state) + connector->funcs->atomic_destroy_state(connector, + connector->state); + + memset(connector, 0, sizeof(*connector)); } EXPORT_SYMBOL(drm_connector_cleanup); @@ -1059,6 +1077,8 @@ void drm_bridge_cleanup(struct drm_bridge *bridge) list_del(&bridge->head); dev->mode_config.num_bridge--; drm_modeset_unlock_all(dev); + + memset(bridge, 0, sizeof(*bridge)); } EXPORT_SYMBOL(drm_bridge_cleanup); @@ -1125,10 +1145,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder) drm_modeset_lock_all(dev); drm_mode_object_put(dev, &encoder->base); kfree(encoder->name); - encoder->name = NULL; list_del(&encoder->head); dev->mode_config.num_encoder--; drm_modeset_unlock_all(dev); + + memset(encoder, 0, sizeof(*encoder)); } EXPORT_SYMBOL(drm_encoder_cleanup); @@ -1244,6 +1265,12 @@ void drm_plane_cleanup(struct drm_plane *plane) if (plane->type == DRM_PLANE_TYPE_OVERLAY) dev->mode_config.num_overlay_plane--; drm_modeset_unlock_all(dev); + + WARN_ON(plane->state && !plane->funcs->atomic_destroy_state); + if (plane->state && plane->funcs->atomic_destroy_state) + plane->funcs->atomic_destroy_state(plane, plane->state); + + memset(plane, 0, sizeof(*plane)); } EXPORT_SYMBOL(drm_plane_cleanup); @@ -1326,6 +1353,11 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev) "PATH", 0); dev->mode_config.path_property = dev_path; + dev->mode_config.tile_property = drm_property_create(dev, + DRM_MODE_PROP_BLOB | + DRM_MODE_PROP_IMMUTABLE, + "TILE", 0); + return 0; } @@ -1955,6 +1987,15 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode, return true; } +static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector) +{ + /* For atomic drivers only state objects are synchronously updated and + * protected by modeset locks, so check those first. */ + if (connector->state) + return connector->state->best_encoder; + return connector->encoder; +} + /** * drm_mode_getconnector - get connector configuration * @dev: drm device for the ioctl @@ -1973,6 +2014,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, { struct drm_mode_get_connector *out_resp = data; struct drm_connector *connector; + struct drm_encoder *encoder; struct drm_display_mode *mode; int mode_count = 0; int props_count = 0; @@ -2028,8 +2070,10 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, out_resp->subpixel = connector->display_info.subpixel_order; out_resp->connection = connector->status; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - if (connector->encoder) - out_resp->encoder_id = connector->encoder->base.id; + + encoder = drm_connector_get_encoder(connector); + if (encoder) + out_resp->encoder_id = encoder->base.id; else out_resp->encoder_id = 0; drm_modeset_unlock(&dev->mode_config.connection_mutex); @@ -2099,6 +2143,33 @@ out: return ret; } +static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder) +{ + struct drm_connector *connector; + struct drm_device *dev = encoder->dev; + bool uses_atomic = false; + + /* For atomic drivers only state objects are synchronously updated and + * protected by modeset locks, so check those first. */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (!connector->state) + continue; + + uses_atomic = true; + + if (connector->state->best_encoder != encoder) + continue; + + return connector->state->crtc; + } + + /* Don't return stale data (e.g. pending async disable). */ + if (uses_atomic) + return NULL; + + return encoder->crtc; +} + /** * drm_mode_getencoder - get encoder configuration * @dev: drm device for the ioctl @@ -2117,6 +2188,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data, { struct drm_mode_get_encoder *enc_resp = data; struct drm_encoder *encoder; + struct drm_crtc *crtc; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; @@ -2126,7 +2198,10 @@ int drm_mode_getencoder(struct drm_device *dev, void *data, return -ENOENT; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - if (encoder->crtc) + crtc = drm_encoder_get_crtc(encoder); + if (crtc) + enc_resp->crtc_id = crtc->base.id; + else if (encoder->crtc) enc_resp->crtc_id = encoder->crtc->base.id; else enc_resp->crtc_id = 0; @@ -2392,7 +2467,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_set_plane *plane_req = data; - struct drm_mode_object *obj; struct drm_plane *plane; struct drm_crtc *crtc = NULL; struct drm_framebuffer *fb = NULL; @@ -2415,14 +2489,12 @@ int drm_mode_setplane(struct drm_device *dev, void *data, * First, find the plane, crtc, and fb objects. If not available, * we don't bother to call the driver. */ - obj = drm_mode_object_find(dev, plane_req->plane_id, - DRM_MODE_OBJECT_PLANE); - if (!obj) { + plane = drm_plane_find(dev, plane_req->plane_id); + if (!plane) { DRM_DEBUG_KMS("Unknown plane ID %d\n", plane_req->plane_id); return -ENOENT; } - plane = obj_to_plane(obj); if (plane_req->fb_id) { fb = drm_framebuffer_lookup(dev, plane_req->fb_id); @@ -2432,14 +2504,12 @@ int drm_mode_setplane(struct drm_device *dev, void *data, return -ENOENT; } - obj = drm_mode_object_find(dev, plane_req->crtc_id, - DRM_MODE_OBJECT_CRTC); - if (!obj) { + crtc = drm_crtc_find(dev, plane_req->crtc_id); + if (!crtc) { DRM_DEBUG_KMS("Unknown crtc ID %d\n", plane_req->crtc_id); return -ENOENT; } - crtc = obj_to_crtc(obj); } /* @@ -3393,7 +3463,7 @@ void drm_fb_release(struct drm_file *priv) /* * When the file gets released that means no one else can access the fb - * list any more, so no need to grab fpriv->fbs_lock. And we need to to + * list any more, so no need to grab fpriv->fbs_lock. And we need to * avoid upsetting lockdep since the universal cursor code adds a * framebuffer while holding mutex locks. * @@ -4032,6 +4102,52 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector, EXPORT_SYMBOL(drm_mode_connector_set_path_property); /** + * drm_mode_connector_set_tile_property - set tile property on connector + * @connector: connector to set property on. + * + * This looks up the tile information for a connector, and creates a + * property for userspace to parse if it exists. The property is of + * the form of 8 integers using ':' as a separator. + * + * Returns: + * Zero on success, errno on failure. + */ +int drm_mode_connector_set_tile_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + int ret, size; + char tile[256]; + + if (connector->tile_blob_ptr) + drm_property_destroy_blob(dev, connector->tile_blob_ptr); + + if (!connector->has_tile) { + connector->tile_blob_ptr = NULL; + ret = drm_object_property_set_value(&connector->base, + dev->mode_config.tile_property, 0); + return ret; + } + + snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d", + connector->tile_group->id, connector->tile_is_single_monitor, + connector->num_h_tile, connector->num_v_tile, + connector->tile_h_loc, connector->tile_v_loc, + connector->tile_h_size, connector->tile_v_size); + size = strlen(tile) + 1; + + connector->tile_blob_ptr = drm_property_create_blob(connector->dev, + size, tile); + if (!connector->tile_blob_ptr) + return -EINVAL; + + ret = drm_object_property_set_value(&connector->base, + dev->mode_config.tile_property, + connector->tile_blob_ptr->base.id); + return ret; +} +EXPORT_SYMBOL(drm_mode_connector_set_tile_property); + +/** * drm_mode_connector_update_edid_property - update the edid property of a connector * @connector: drm connector * @edid: new value of the edid property @@ -5101,6 +5217,7 @@ void drm_mode_config_init(struct drm_device *dev) INIT_LIST_HEAD(&dev->mode_config.property_blob_list); INIT_LIST_HEAD(&dev->mode_config.plane_list); idr_init(&dev->mode_config.crtc_idr); + idr_init(&dev->mode_config.tile_idr); drm_modeset_lock_all(dev); drm_mode_create_standard_connector_properties(dev); @@ -5188,6 +5305,7 @@ void drm_mode_config_cleanup(struct drm_device *dev) crtc->funcs->destroy(crtc); } + idr_destroy(&dev->mode_config.tile_idr); idr_destroy(&dev->mode_config.crtc_idr); drm_modeset_lock_fini(&dev->mode_config.connection_mutex); } @@ -5210,3 +5328,100 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, supported_rotations); } EXPORT_SYMBOL(drm_mode_create_rotation_property); + +/** + * DOC: Tile group + * + * Tile groups are used to represent tiled monitors with a unique + * integer identifier. Tiled monitors using DisplayID v1.3 have + * a unique 8-byte handle, we store this in a tile group, so we + * have a common identifier for all tiles in a monitor group. + */ +static void drm_tile_group_free(struct kref *kref) +{ + struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount); + struct drm_device *dev = tg->dev; + mutex_lock(&dev->mode_config.idr_mutex); + idr_remove(&dev->mode_config.tile_idr, tg->id); + mutex_unlock(&dev->mode_config.idr_mutex); + kfree(tg); +} + +/** + * drm_mode_put_tile_group - drop a reference to a tile group. + * @dev: DRM device + * @tg: tile group to drop reference to. + * + * drop reference to tile group and free if 0. + */ +void drm_mode_put_tile_group(struct drm_device *dev, + struct drm_tile_group *tg) +{ + kref_put(&tg->refcount, drm_tile_group_free); +} + +/** + * drm_mode_get_tile_group - get a reference to an existing tile group + * @dev: DRM device + * @topology: 8-bytes unique per monitor. + * + * Use the unique bytes to get a reference to an existing tile group. + * + * RETURNS: + * tile group or NULL if not found. + */ +struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, + char topology[8]) +{ + struct drm_tile_group *tg; + int id; + mutex_lock(&dev->mode_config.idr_mutex); + idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) { + if (!memcmp(tg->group_data, topology, 8)) { + if (!kref_get_unless_zero(&tg->refcount)) + tg = NULL; + mutex_unlock(&dev->mode_config.idr_mutex); + return tg; + } + } + mutex_unlock(&dev->mode_config.idr_mutex); + return NULL; +} + +/** + * drm_mode_create_tile_group - create a tile group from a displayid description + * @dev: DRM device + * @topology: 8-bytes unique per monitor. + * + * Create a tile group for the unique monitor, and get a unique + * identifier for the tile group. + * + * RETURNS: + * new tile group or error. + */ +struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, + char topology[8]) +{ + struct drm_tile_group *tg; + int ret; + + tg = kzalloc(sizeof(*tg), GFP_KERNEL); + if (!tg) + return ERR_PTR(-ENOMEM); + + kref_init(&tg->refcount); + memcpy(tg->group_data, topology, 8); + tg->dev = dev; + + mutex_lock(&dev->mode_config.idr_mutex); + ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL); + if (ret >= 0) { + tg->id = ret; + } else { + kfree(tg); + tg = ERR_PTR(ret); + } + + mutex_unlock(&dev->mode_config.idr_mutex); + return tg; +} diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 959e2074b0d4..79968e39c8d0 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -186,10 +186,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, /* * The specification doesn't give any recommendation on how often to - * retry native transactions, so retry 7 times like for I2C-over-AUX - * transactions. + * retry native transactions. We used to retry 7 times like for + * aux i2c transactions but real world devices this wasn't + * sufficient, bump to 32 which makes Dell 4k monitors happier. */ - for (retry = 0; retry < 7; retry++) { + for (retry = 0; retry < 32; retry++) { mutex_lock(&aux->hw_mutex); err = aux->transfer(aux, &msg); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 5682d7e9f1ec..9a5b68717ec8 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -839,6 +839,8 @@ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) { + struct drm_dp_mst_branch *mstb; + switch (old_pdt) { case DP_PEER_DEVICE_DP_LEGACY_CONV: case DP_PEER_DEVICE_SST_SINK: @@ -846,8 +848,9 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) drm_dp_mst_unregister_i2c_bus(&port->aux); break; case DP_PEER_DEVICE_MST_BRANCHING: - drm_dp_put_mst_branch_device(port->mstb); + mstb = port->mstb; port->mstb = NULL; + drm_dp_put_mst_branch_device(mstb); break; } } @@ -858,6 +861,8 @@ static void drm_dp_destroy_port(struct kref *kref) struct drm_dp_mst_topology_mgr *mgr = port->mgr; if (!port->input) { port->vcpi.num_slots = 0; + + kfree(port->cached_edid); if (port->connector) (*port->mgr->cbs->destroy_connector)(mgr, port->connector); drm_dp_port_teardown_pdt(port, port->pdt); @@ -1097,6 +1102,10 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, char proppath[255]; build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); + + if (port->port_num >= 8) { + port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); + } } /* put reference to this port */ @@ -2167,7 +2176,8 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq); * This returns the current connection state for a port. It validates the * port pointer still exists so the caller doesn't require a reference */ -enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, + struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) { enum drm_connector_status status = connector_status_disconnected; @@ -2186,6 +2196,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr case DP_PEER_DEVICE_SST_SINK: status = connector_status_connected; + /* for logical ports - cache the EDID */ + if (port->port_num >= 8 && !port->cached_edid) { + port->cached_edid = drm_get_edid(connector, &port->aux.ddc); + } break; case DP_PEER_DEVICE_DP_LEGACY_CONV: if (port->ldps) @@ -2217,7 +2231,12 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ if (!port) return NULL; - edid = drm_get_edid(connector, &port->aux.ddc); + if (port->cached_edid) + edid = drm_edid_duplicate(port->cached_edid); + else + edid = drm_get_edid(connector, &port->aux.ddc); + + drm_mode_connector_set_tile_property(connector); drm_dp_put_port(port); return edid; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 2e5c7d941313..4f41377b0b80 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -535,6 +535,8 @@ static void drm_fs_inode_free(struct inode *inode) * The initial ref-count of the object is 1. Use drm_dev_ref() and * drm_dev_unref() to take and drop further ref-counts. * + * Note that for purely virtual devices @parent can be NULL. + * * RETURNS: * Pointer to new DRM device, or NULL if out of memory. */ diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 1a77a49d2695..53bc7a628909 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -34,6 +34,7 @@ #include <linux/module.h> #include <drm/drmP.h> #include <drm/drm_edid.h> +#include <drm/drm_displayid.h> #define version_greater(edid, maj, min) \ (((edid)->version > (maj)) || \ @@ -1014,6 +1015,27 @@ module_param_named(edid_fixup, edid_fixup, int, 0400); MODULE_PARM_DESC(edid_fixup, "Minimum number of valid EDID header bytes (0-8, default 6)"); +static void drm_get_displayid(struct drm_connector *connector, + struct edid *edid); + +static int drm_edid_block_checksum(const u8 *raw_edid) +{ + int i; + u8 csum = 0; + for (i = 0; i < EDID_LENGTH; i++) + csum += raw_edid[i]; + + return csum; +} + +static bool drm_edid_is_zero(const u8 *in_edid, int length) +{ + if (memchr_inv(in_edid, 0, length)) + return false; + + return true; +} + /** * drm_edid_block_valid - Sanity check the EDID block (base or extension) * @raw_edid: pointer to raw EDID block @@ -1027,8 +1049,7 @@ MODULE_PARM_DESC(edid_fixup, */ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) { - int i; - u8 csum = 0; + u8 csum; struct edid *edid = (struct edid *)raw_edid; if (WARN_ON(!raw_edid)) @@ -1048,8 +1069,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) } } - for (i = 0; i < EDID_LENGTH; i++) - csum += raw_edid[i]; + csum = drm_edid_block_checksum(raw_edid); if (csum) { if (print_bad_edid) { DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); @@ -1080,9 +1100,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) bad: if (print_bad_edid) { - printk(KERN_ERR "Raw EDID:\n"); - print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, + if (drm_edid_is_zero(raw_edid, EDID_LENGTH)) { + printk(KERN_ERR "EDID block is all zeroes\n"); + } else { + printk(KERN_ERR "Raw EDID:\n"); + print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, raw_edid, EDID_LENGTH, false); + } } return false; } @@ -1115,7 +1139,7 @@ EXPORT_SYMBOL(drm_edid_is_valid); #define DDC_SEGMENT_ADDR 0x30 /** * drm_do_probe_ddc_edid() - get EDID information via I2C - * @adapter: I2C device adaptor + * @data: I2C device adapter * @buf: EDID data buffer to be filled * @block: 128 byte EDID block to start fetching from * @len: EDID data buffer length to fetch @@ -1176,14 +1200,6 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len) return ret == xfers ? 0 : -1; } -static bool drm_edid_is_zero(u8 *in_edid, int length) -{ - if (memchr_inv(in_edid, 0, length)) - return false; - - return true; -} - /** * drm_do_get_edid - get EDID data using a custom EDID block read function * @connector: connector we're probing @@ -1308,10 +1324,15 @@ EXPORT_SYMBOL(drm_probe_ddc); struct edid *drm_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) { + struct edid *edid; + if (!drm_probe_ddc(adapter)) return NULL; - return drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter); + edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter); + if (edid) + drm_get_displayid(connector, edid); + return edid; } EXPORT_SYMBOL(drm_get_edid); @@ -2406,7 +2427,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, /* * Search EDID for CEA extension block. */ -static u8 *drm_find_cea_extension(struct edid *edid) +static u8 *drm_find_edid_extension(struct edid *edid, int ext_id) { u8 *edid_ext = NULL; int i; @@ -2418,7 +2439,7 @@ static u8 *drm_find_cea_extension(struct edid *edid) /* Find CEA extension */ for (i = 0; i < edid->extensions; i++) { edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); - if (edid_ext[0] == CEA_EXT) + if (edid_ext[0] == ext_id) break; } @@ -2428,6 +2449,16 @@ static u8 *drm_find_cea_extension(struct edid *edid) return edid_ext; } +static u8 *drm_find_cea_extension(struct edid *edid) +{ + return drm_find_edid_extension(edid, CEA_EXT); +} + +static u8 *drm_find_displayid_extension(struct edid *edid) +{ + return drm_find_edid_extension(edid, DISPLAYID_EXT); +} + /* * Calculate the alternate clock for the CEA mode * (60Hz vs. 59.94Hz etc.) @@ -3145,9 +3176,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) } } eld[5] |= sad_count << 4; - eld[2] = (20 + mnl + sad_count * 3 + 3) / 4; - DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count); + eld[DRM_ELD_BASELINE_ELD_LEN] = + DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4); + + DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", + drm_eld_size(eld), sad_count); } EXPORT_SYMBOL(drm_edid_to_eld); @@ -3885,3 +3919,123 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, return 0; } EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode); + +static int drm_parse_display_id(struct drm_connector *connector, + u8 *displayid, int length, + bool is_edid_extension) +{ + /* if this is an EDID extension the first byte will be 0x70 */ + int idx = 0; + struct displayid_hdr *base; + struct displayid_block *block; + u8 csum = 0; + int i; + + if (is_edid_extension) + idx = 1; + + base = (struct displayid_hdr *)&displayid[idx]; + + DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n", + base->rev, base->bytes, base->prod_id, base->ext_count); + + if (base->bytes + 5 > length - idx) + return -EINVAL; + + for (i = idx; i <= base->bytes + 5; i++) { + csum += displayid[i]; + } + if (csum) { + DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum); + return -EINVAL; + } + + block = (struct displayid_block *)&displayid[idx + 4]; + DRM_DEBUG_KMS("block id %d, rev %d, len %d\n", + block->tag, block->rev, block->num_bytes); + + switch (block->tag) { + case DATA_BLOCK_TILED_DISPLAY: { + struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block; + + u16 w, h; + u8 tile_v_loc, tile_h_loc; + u8 num_v_tile, num_h_tile; + struct drm_tile_group *tg; + + w = tile->tile_size[0] | tile->tile_size[1] << 8; + h = tile->tile_size[2] | tile->tile_size[3] << 8; + + num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30); + num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30); + tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4); + tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4); + + connector->has_tile = true; + if (tile->tile_cap & 0x80) + connector->tile_is_single_monitor = true; + + connector->num_h_tile = num_h_tile + 1; + connector->num_v_tile = num_v_tile + 1; + connector->tile_h_loc = tile_h_loc; + connector->tile_v_loc = tile_v_loc; + connector->tile_h_size = w + 1; + connector->tile_v_size = h + 1; + + DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap); + DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1); + DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n", + num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc); + DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]); + + tg = drm_mode_get_tile_group(connector->dev, tile->topology_id); + if (!tg) { + tg = drm_mode_create_tile_group(connector->dev, tile->topology_id); + } + if (!tg) + return -ENOMEM; + + if (connector->tile_group != tg) { + /* if we haven't got a pointer, + take the reference, drop ref to old tile group */ + if (connector->tile_group) { + drm_mode_put_tile_group(connector->dev, connector->tile_group); + } + connector->tile_group = tg; + } else + /* if same tile group, then release the ref we just took. */ + drm_mode_put_tile_group(connector->dev, tg); + } + break; + default: + printk("unknown displayid tag %d\n", block->tag); + break; + } + return 0; +} + +static void drm_get_displayid(struct drm_connector *connector, + struct edid *edid) +{ + void *displayid = NULL; + int ret; + connector->has_tile = false; + displayid = drm_find_displayid_extension(edid); + if (!displayid) { + /* drop reference to any tile group we had */ + goto out_drop_ref; + } + + ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true); + if (ret < 0) + goto out_drop_ref; + if (!connector->has_tile) + goto out_drop_ref; + return; +out_drop_ref: + if (connector->tile_group) { + drm_mode_put_tile_group(connector->dev, connector->tile_group); + connector->tile_group = NULL; + } + return; +} diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 09d47e9ba026..52ce26d6b4fb 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -347,9 +347,18 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; bool ret; + bool do_delayed = false; + drm_modeset_lock_all(dev); ret = restore_fbdev_mode(fb_helper); + + do_delayed = fb_helper->delayed_hotplug; + if (do_delayed) + fb_helper->delayed_hotplug = false; drm_modeset_unlock_all(dev); + + if (do_delayed) + drm_fb_helper_hotplug_event(fb_helper); return ret; } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); @@ -888,10 +897,6 @@ int drm_fb_helper_set_par(struct fb_info *info) drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); - if (fb_helper->delayed_hotplug) { - fb_helper->delayed_hotplug = false; - drm_fb_helper_hotplug_event(fb_helper); - } return 0; } EXPORT_SYMBOL(drm_fb_helper_set_par); @@ -995,19 +1000,21 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, crtc_count = 0; for (i = 0; i < fb_helper->crtc_count; i++) { struct drm_display_mode *desired_mode; + int x, y; desired_mode = fb_helper->crtc_info[i].desired_mode; - + x = fb_helper->crtc_info[i].x; + y = fb_helper->crtc_info[i].y; if (desired_mode) { if (gamma_size == 0) gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; - if (desired_mode->hdisplay < sizes.fb_width) - sizes.fb_width = desired_mode->hdisplay; - if (desired_mode->vdisplay < sizes.fb_height) - sizes.fb_height = desired_mode->vdisplay; - if (desired_mode->hdisplay > sizes.surface_width) - sizes.surface_width = desired_mode->hdisplay; - if (desired_mode->vdisplay > sizes.surface_height) - sizes.surface_height = desired_mode->vdisplay; + if (desired_mode->hdisplay + x < sizes.fb_width) + sizes.fb_width = desired_mode->hdisplay + x; + if (desired_mode->vdisplay + y < sizes.fb_height) + sizes.fb_height = desired_mode->vdisplay + y; + if (desired_mode->hdisplay + x > sizes.surface_width) + sizes.surface_width = desired_mode->hdisplay + x; + if (desired_mode->vdisplay + y > sizes.surface_height) + sizes.surface_height = desired_mode->vdisplay + y; crtc_count++; } } @@ -1307,6 +1314,7 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper, static bool drm_target_cloned(struct drm_fb_helper *fb_helper, struct drm_display_mode **modes, + struct drm_fb_offset *offsets, bool *enabled, int width, int height) { int count, i, j; @@ -1378,27 +1386,88 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper, return false; } +static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper, + struct drm_display_mode **modes, + struct drm_fb_offset *offsets, + int idx, + int h_idx, int v_idx) +{ + struct drm_fb_helper_connector *fb_helper_conn; + int i; + int hoffset = 0, voffset = 0; + + for (i = 0; i < fb_helper->connector_count; i++) { + fb_helper_conn = fb_helper->connector_info[i]; + if (!fb_helper_conn->connector->has_tile) + continue; + + if (!modes[i] && (h_idx || v_idx)) { + DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i, + fb_helper_conn->connector->base.id); + continue; + } + if (fb_helper_conn->connector->tile_h_loc < h_idx) + hoffset += modes[i]->hdisplay; + + if (fb_helper_conn->connector->tile_v_loc < v_idx) + voffset += modes[i]->vdisplay; + } + offsets[idx].x = hoffset; + offsets[idx].y = voffset; + DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx); + return 0; +} + static bool drm_target_preferred(struct drm_fb_helper *fb_helper, struct drm_display_mode **modes, + struct drm_fb_offset *offsets, bool *enabled, int width, int height) { struct drm_fb_helper_connector *fb_helper_conn; int i; - + uint64_t conn_configured = 0, mask; + int tile_pass = 0; + mask = (1 << fb_helper->connector_count) - 1; +retry: for (i = 0; i < fb_helper->connector_count; i++) { fb_helper_conn = fb_helper->connector_info[i]; - if (enabled[i] == false) + if (conn_configured & (1 << i)) continue; + if (enabled[i] == false) { + conn_configured |= (1 << i); + continue; + } + + /* first pass over all the untiled connectors */ + if (tile_pass == 0 && fb_helper_conn->connector->has_tile) + continue; + + if (tile_pass == 1) { + if (fb_helper_conn->connector->tile_h_loc != 0 || + fb_helper_conn->connector->tile_v_loc != 0) + continue; + + } else { + if (fb_helper_conn->connector->tile_h_loc != tile_pass -1 && + fb_helper_conn->connector->tile_v_loc != tile_pass - 1) + /* if this tile_pass doesn't cover any of the tiles - keep going */ + continue; + + /* find the tile offsets for this pass - need + to find all tiles left and above */ + drm_get_tile_offsets(fb_helper, modes, offsets, + i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc); + } DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", fb_helper_conn->connector->base.id); /* got for command line mode first */ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); if (!modes[i]) { - DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", - fb_helper_conn->connector->base.id); + DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n", + fb_helper_conn->connector->base.id, fb_helper_conn->connector->tile_group ? fb_helper_conn->connector->tile_group->id : 0); modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height); } /* No preferred modes, pick one off the list */ @@ -1408,6 +1477,12 @@ static bool drm_target_preferred(struct drm_fb_helper *fb_helper, } DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : "none"); + conn_configured |= (1 << i); + } + + if ((conn_configured & mask) != mask) { + tile_pass++; + goto retry; } return true; } @@ -1497,6 +1572,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) struct drm_device *dev = fb_helper->dev; struct drm_fb_helper_crtc **crtcs; struct drm_display_mode **modes; + struct drm_fb_offset *offsets; struct drm_mode_set *modeset; bool *enabled; int width, height; @@ -1511,9 +1587,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); modes = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_display_mode *), GFP_KERNEL); + offsets = kcalloc(dev->mode_config.num_connector, + sizeof(struct drm_fb_offset), GFP_KERNEL); enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), GFP_KERNEL); - if (!crtcs || !modes || !enabled) { + if (!crtcs || !modes || !enabled || !offsets) { DRM_ERROR("Memory allocation failed\n"); goto out; } @@ -1523,14 +1601,16 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) if (!(fb_helper->funcs->initial_config && fb_helper->funcs->initial_config(fb_helper, crtcs, modes, + offsets, enabled, width, height))) { memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0])); memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0])); + memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0])); - if (!drm_target_cloned(fb_helper, - modes, enabled, width, height) && - !drm_target_preferred(fb_helper, - modes, enabled, width, height)) + if (!drm_target_cloned(fb_helper, modes, offsets, + enabled, width, height) && + !drm_target_preferred(fb_helper, modes, offsets, + enabled, width, height)) DRM_ERROR("Unable to find initial modes\n"); DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", @@ -1550,18 +1630,23 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) for (i = 0; i < fb_helper->connector_count; i++) { struct drm_display_mode *mode = modes[i]; struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; + struct drm_fb_offset *offset = &offsets[i]; modeset = &fb_crtc->mode_set; if (mode && fb_crtc) { - DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", - mode->name, fb_crtc->mode_set.crtc->base.id); + DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n", + mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y); fb_crtc->desired_mode = mode; + fb_crtc->x = offset->x; + fb_crtc->y = offset->y; if (modeset->mode) drm_mode_destroy(dev, modeset->mode); modeset->mode = drm_mode_duplicate(dev, fb_crtc->desired_mode); modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; modeset->fb = fb_helper->fb; + modeset->x = offset->x; + modeset->y = offset->y; } } @@ -1578,6 +1663,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) out: kfree(crtcs); kfree(modes); + kfree(offsets); kfree(enabled); } diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 91e1105f2800..0b9514b6cd64 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -527,6 +527,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer, if (copy_to_user(buffer + total, e->event, e->event->length)) { total = -EFAULT; + e->destroy(e); break; } diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 0e47df4ef24e..4d79dad9d44f 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -166,7 +166,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) spin_lock_irqsave(&dev->vblank_time_lock, irqflags); /* - * If the vblank interrupt was already disbled update the count + * If the vblank interrupt was already disabled update the count * and timestamp to maintain the appearance that the counter * has been ticking all along until this time. This makes the * count account for the entire time between drm_vblank_on() and @@ -830,6 +830,8 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, * vblank events since the system was booted, including lost events due to * modesetting activity. * + * This is the legacy version of drm_crtc_vblank_count(). + * * Returns: * The software vblank counter. */ @@ -844,6 +846,25 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc) EXPORT_SYMBOL(drm_vblank_count); /** + * drm_crtc_vblank_count - retrieve "cooked" vblank counter value + * @crtc: which counter to retrieve + * + * Fetches the "cooked" vblank count value that represents the number of + * vblank events since the system was booted, including lost events due to + * modesetting activity. + * + * This is the native KMS version of drm_vblank_count(). + * + * Returns: + * The software vblank counter. + */ +u32 drm_crtc_vblank_count(struct drm_crtc *crtc) +{ + return drm_vblank_count(crtc->dev, drm_crtc_index(crtc)); +} +EXPORT_SYMBOL(drm_crtc_vblank_count); + +/** * drm_vblank_count_and_time - retrieve "cooked" vblank counter value * and the system timestamp corresponding to that vblank counter value. * @@ -904,6 +925,8 @@ static void send_vblank_event(struct drm_device *dev, * * Updates sequence # and timestamp on event, and sends it to userspace. * Caller must hold event lock. + * + * This is the legacy version of drm_crtc_send_vblank_event(). */ void drm_send_vblank_event(struct drm_device *dev, int crtc, struct drm_pending_vblank_event *e) @@ -923,6 +946,23 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc, EXPORT_SYMBOL(drm_send_vblank_event); /** + * drm_crtc_send_vblank_event - helper to send vblank event after pageflip + * @crtc: the source CRTC of the vblank event + * @e: the event to send + * + * Updates sequence # and timestamp on event, and sends it to userspace. + * Caller must hold event lock. + * + * This is the native KMS version of drm_send_vblank_event(). + */ +void drm_crtc_send_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e) +{ + drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e); +} +EXPORT_SYMBOL(drm_crtc_send_vblank_event); + +/** * drm_vblank_enable - enable the vblank interrupt on a CRTC * @dev: DRM device * @crtc: CRTC in question @@ -1594,6 +1634,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc) * * Drivers should call this routine in their vblank interrupt handlers to * update the vblank counter and send any signals that may be pending. + * + * This is the legacy version of drm_crtc_handle_vblank(). */ bool drm_handle_vblank(struct drm_device *dev, int crtc) { @@ -1670,3 +1712,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) return true; } EXPORT_SYMBOL(drm_handle_vblank); + +/** + * drm_crtc_handle_vblank - handle a vblank event + * @crtc: where this event occurred + * + * Drivers should call this routine in their vblank interrupt handlers to + * update the vblank counter and send any signals that may be pending. + * + * This is the native KMS version of drm_handle_vblank(). + * + * Returns: + * True if the event was successfully handled, false on failure. + */ +bool drm_crtc_handle_vblank(struct drm_crtc *crtc) +{ + return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc)); +} +EXPORT_SYMBOL(drm_crtc_handle_vblank); diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 93c6533c25da..18a1ac6ac22f 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -443,7 +443,7 @@ int drm_plane_helper_commit(struct drm_plane *plane, crtc_funcs[i]->atomic_begin(crtc[i]); } - plane_funcs->atomic_update(plane); + plane_funcs->atomic_update(plane, plane_state); for (i = 0; i < 2; i++) { if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush) diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index d71fb54582d2..121470a83d1a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -618,7 +618,6 @@ static struct platform_driver exynos_drm_platform_driver = { .probe = exynos_drm_platform_probe, .remove = exynos_drm_platform_remove, .driver = { - .owner = THIS_MODULE, .name = "exynos-drm", .pm = &exynos_drm_pm_ops, }, @@ -646,6 +645,18 @@ static int exynos_drm_init(void) if (!is_exynos) return -ENODEV; + /* + * Register device object only in case of Exynos SoC. + * + * Below codes resolves temporarily infinite loop issue incurred + * by Exynos drm driver when using multi-platform kernel. + * So these codes will be replaced with more generic way later. + */ + if (!of_machine_is_compatible("samsung,exynos3") && + !of_machine_is_compatible("samsung,exynos4") && + !of_machine_is_compatible("samsung,exynos5")) + return -ENODEV; + exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1, NULL, 0); if (IS_ERR(exynos_drm_pdev)) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 68d38eb6774d..835b6af00970 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1817,7 +1817,7 @@ static int fimc_resume(struct device *dev) } #endif -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static int fimc_runtime_suspend(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 6ff8599f6cbf..81a250830808 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1540,7 +1540,7 @@ static int g2d_resume(struct device *dev) } #endif -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static int g2d_runtime_suspend(struct device *dev) { struct g2d_data *g2d = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index c6a013fc321c..0261468c8019 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1764,7 +1764,7 @@ static int gsc_resume(struct device *dev) } #endif -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static int gsc_runtime_suspend(struct device *dev) { struct gsc_context *ctx = get_gsc_context(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index b6a37d4f5b13..425e70625388 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -822,7 +822,7 @@ static int rotator_resume(struct device *dev) } #endif -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static int rotator_runtime_suspend(struct device *dev) { struct rot_context *rot = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile index b15315576376..190e55f2f891 100644 --- a/drivers/gpu/drm/gma500/Makefile +++ b/drivers/gpu/drm/gma500/Makefile @@ -39,6 +39,7 @@ gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \ gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \ oaktrail_crtc.o \ oaktrail_lvds.o \ + oaktrail_lvds_i2c.o \ oaktrail_hdmi.o \ oaktrail_hdmi_i2c.o diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 0d39da6e8b7a..83bbc271bcfb 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -359,22 +359,26 @@ void oaktrail_lvds_init(struct drm_device *dev, * if closed, act like it's not there for now */ + edid = NULL; mutex_lock(&dev->mode_config.mutex); i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus); - if (i2c_adap == NULL) - dev_err(dev->dev, "No ddc adapter available!\n"); + if (i2c_adap) + edid = drm_get_edid(connector, i2c_adap); + if (edid == NULL && dev_priv->lpc_gpio_base) { + oaktrail_lvds_i2c_init(encoder); + if (gma_encoder->ddc_bus != NULL) { + i2c_adap = &gma_encoder->ddc_bus->adapter; + edid = drm_get_edid(connector, i2c_adap); + } + } /* * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ - if (i2c_adap) { - edid = drm_get_edid(connector, i2c_adap); - if (edid) { - drm_mode_connector_update_edid_property(connector, - edid); - drm_add_edid_modes(connector, edid); - kfree(edid); - } + if (edid) { + drm_mode_connector_update_edid_property(connector, edid); + drm_add_edid_modes(connector, edid); + kfree(edid); list_for_each_entry(scan, &connector->probed_modes, head) { if (scan->type & DRM_MODE_TYPE_PREFERRED) { @@ -383,7 +387,8 @@ void oaktrail_lvds_init(struct drm_device *dev, goto out; /* FIXME: check for quirks */ } } - } + } else + dev_err(dev->dev, "No ddc adapter available!\n"); /* * If we didn't get EDID, try geting panel timing * from configuration data @@ -411,8 +416,10 @@ failed_find: mutex_unlock(&dev->mode_config.mutex); dev_dbg(dev->dev, "No LVDS modes found, disabling.\n"); - if (gma_encoder->ddc_bus) + if (gma_encoder->ddc_bus) { psb_intel_i2c_destroy(gma_encoder->ddc_bus); + gma_encoder->ddc_bus = NULL; + } /* failed_ddc: */ diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c new file mode 100644 index 000000000000..f913a62eee5f --- /dev/null +++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2002-2010, Intel Corporation. + * Copyright (c) 2014 ATRON electronic GmbH + * Author: Jan Safrata <jan.nikitenko@gmail.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <linux/i2c.h> +#include <linux/i2c-algo-bit.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/delay.h> + +#include <drm/drmP.h> +#include "psb_drv.h" +#include "psb_intel_reg.h" + + +/* + * LPC GPIO based I2C bus for LVDS of Atom E6xx + */ + +/*----------------------------------------------------------------------------- + * LPC Register Offsets. Used for LVDS GPIO Bit Bashing. Registers are part + * Atom E6xx [D31:F0] + ----------------------------------------------------------------------------*/ +#define RGEN 0x20 +#define RGIO 0x24 +#define RGLVL 0x28 +#define RGTPE 0x2C +#define RGTNE 0x30 +#define RGGPE 0x34 +#define RGSMI 0x38 +#define RGTS 0x3C + +/* The LVDS GPIO clock lines are GPIOSUS[3] + * The LVDS GPIO data lines are GPIOSUS[4] + */ +#define GPIO_CLOCK 0x08 +#define GPIO_DATA 0x10 + +#define LPC_READ_REG(chan, r) inl((chan)->reg + (r)) +#define LPC_WRITE_REG(chan, r, val) outl((val), (chan)->reg + (r)) + +static int get_clock(void *data) +{ + struct psb_intel_i2c_chan *chan = data; + u32 val, tmp; + + val = LPC_READ_REG(chan, RGIO); + val |= GPIO_CLOCK; + LPC_WRITE_REG(chan, RGIO, val); + tmp = LPC_READ_REG(chan, RGLVL); + val = (LPC_READ_REG(chan, RGLVL) & GPIO_CLOCK) ? 1 : 0; + + return val; +} + +static int get_data(void *data) +{ + struct psb_intel_i2c_chan *chan = data; + u32 val, tmp; + + val = LPC_READ_REG(chan, RGIO); + val |= GPIO_DATA; + LPC_WRITE_REG(chan, RGIO, val); + tmp = LPC_READ_REG(chan, RGLVL); + val = (LPC_READ_REG(chan, RGLVL) & GPIO_DATA) ? 1 : 0; + + return val; +} + +static void set_clock(void *data, int state_high) +{ + struct psb_intel_i2c_chan *chan = data; + u32 val; + + if (state_high) { + val = LPC_READ_REG(chan, RGIO); + val |= GPIO_CLOCK; + LPC_WRITE_REG(chan, RGIO, val); + } else { + val = LPC_READ_REG(chan, RGIO); + val &= ~GPIO_CLOCK; + LPC_WRITE_REG(chan, RGIO, val); + val = LPC_READ_REG(chan, RGLVL); + val &= ~GPIO_CLOCK; + LPC_WRITE_REG(chan, RGLVL, val); + } +} + +static void set_data(void *data, int state_high) +{ + struct psb_intel_i2c_chan *chan = data; + u32 val; + + if (state_high) { + val = LPC_READ_REG(chan, RGIO); + val |= GPIO_DATA; + LPC_WRITE_REG(chan, RGIO, val); + } else { + val = LPC_READ_REG(chan, RGIO); + val &= ~GPIO_DATA; + LPC_WRITE_REG(chan, RGIO, val); + val = LPC_READ_REG(chan, RGLVL); + val &= ~GPIO_DATA; + LPC_WRITE_REG(chan, RGLVL, val); + } +} + +void oaktrail_lvds_i2c_init(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct gma_encoder *gma_encoder = to_gma_encoder(encoder); + struct drm_psb_private *dev_priv = dev->dev_private; + struct psb_intel_i2c_chan *chan; + + chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL); + if (!chan) + return; + + chan->drm_dev = dev; + chan->reg = dev_priv->lpc_gpio_base; + strncpy(chan->adapter.name, "gma500 LPC", I2C_NAME_SIZE - 1); + chan->adapter.owner = THIS_MODULE; + chan->adapter.algo_data = &chan->algo; + chan->adapter.dev.parent = &dev->pdev->dev; + chan->algo.setsda = set_data; + chan->algo.setscl = set_clock; + chan->algo.getsda = get_data; + chan->algo.getscl = get_clock; + chan->algo.udelay = 100; + chan->algo.timeout = usecs_to_jiffies(2200); + chan->algo.data = chan; + + i2c_set_adapdata(&chan->adapter, chan); + + set_data(chan, 1); + set_clock(chan, 1); + udelay(50); + + if (i2c_bit_add_bus(&chan->adapter)) { + kfree(chan); + return; + } + + gma_encoder->ddc_bus = chan; +} diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 6ec3a905fdd2..92e7e5795398 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -212,6 +212,8 @@ static int psb_driver_unload(struct drm_device *dev) } if (dev_priv->aux_pdev) pci_dev_put(dev_priv->aux_pdev); + if (dev_priv->lpc_pdev) + pci_dev_put(dev_priv->lpc_pdev); /* Destroy VBT data */ psb_intel_destroy_bios(dev); @@ -280,6 +282,24 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) DRM_DEBUG_KMS("Couldn't find aux pci device"); } dev_priv->gmbus_reg = dev_priv->aux_reg; + + dev_priv->lpc_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(31, 0)); + if (dev_priv->lpc_pdev) { + pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA, + &dev_priv->lpc_gpio_base); + pci_write_config_dword(dev_priv->lpc_pdev, PSB_LPC_GBA, + (u32)dev_priv->lpc_gpio_base | (1L<<31)); + pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA, + &dev_priv->lpc_gpio_base); + dev_priv->lpc_gpio_base &= 0xffc0; + if (dev_priv->lpc_gpio_base) + DRM_DEBUG_KMS("Found LPC GPIO at 0x%04x\n", + dev_priv->lpc_gpio_base); + else { + pci_dev_put(dev_priv->lpc_pdev); + dev_priv->lpc_pdev = NULL; + } + } } else { dev_priv->gmbus_reg = dev_priv->vdc_reg; } diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 55ebe2bd88dd..e38057b91865 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -83,6 +83,7 @@ enum { #define PSB_PGETBL_CTL 0x2020 #define _PSB_PGETBL_ENABLED 0x00000001 #define PSB_SGX_2D_SLAVE_PORT 0x4000 +#define PSB_LPC_GBA 0x44 /* TODO: To get rid of */ #define PSB_TT_PRIV0_LIMIT (256*1024*1024) @@ -441,6 +442,7 @@ struct psb_ops; struct drm_psb_private { struct drm_device *dev; struct pci_dev *aux_pdev; /* Currently only used by mrst */ + struct pci_dev *lpc_pdev; /* Currently only used by mrst */ const struct psb_ops *ops; const struct psb_offset *regmap; @@ -470,6 +472,7 @@ struct drm_psb_private { uint8_t __iomem *sgx_reg; uint8_t __iomem *vdc_reg; uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */ + uint16_t lpc_gpio_base; uint32_t gatt_free_offset; /* Fencing / irq */ diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 336bd3aa1a06..860dd2177ca1 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -223,6 +223,7 @@ extern void oaktrail_lvds_init(struct drm_device *dev, extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev); extern void oaktrail_dsi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev); +extern void oaktrail_lvds_i2c_init(struct drm_encoder *encoder); extern void mid_dsi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int dsi_num); diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 891e584e97ea..e4083e41a600 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -51,6 +51,7 @@ i915-y += intel_audio.o \ intel_frontbuffer.o \ intel_modes.o \ intel_overlay.o \ + intel_psr.o \ intel_sideband.o \ intel_sprite.o i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 809bb957b452..22c992a78ac6 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -413,6 +413,8 @@ static const u32 gen7_render_regs[] = { REG64(PS_INVOCATION_COUNT), REG64(PS_DEPTH_COUNT), OACONTROL, /* Only allowed for LRI and SRM. See below. */ + REG64(MI_PREDICATE_SRC0), + REG64(MI_PREDICATE_SRC1), GEN7_3DPRIM_END_OFFSET, GEN7_3DPRIM_START_VERTEX, GEN7_3DPRIM_VERTEX_COUNT, @@ -1072,6 +1074,8 @@ int i915_cmd_parser_get_version(void) * * 1. Initial version. Checks batches and reports violations, but leaves * hardware parsing enabled (so does not allow new use cases). + * 2. Allow access to the MI_PREDICATE_SRC0 and + * MI_PREDICATE_SRC1 registers. */ - return 1; + return 2; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 319da61354b0..779a275eb1fd 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1240,11 +1240,12 @@ static int vlv_drpc_info(struct seq_file *m) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 rpmodectl1, rcctl1; + u32 rpmodectl1, rcctl1, pw_status; unsigned fw_rendercount = 0, fw_mediacount = 0; intel_runtime_pm_get(dev_priv); + pw_status = I915_READ(VLV_GTLC_PW_STATUS); rpmodectl1 = I915_READ(GEN6_RP_CONTROL); rcctl1 = I915_READ(GEN6_RC_CONTROL); @@ -1263,11 +1264,9 @@ static int vlv_drpc_info(struct seq_file *m) yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))); seq_printf(m, "Render Power Well: %s\n", - (I915_READ(VLV_GTLC_PW_STATUS) & - VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); + (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); seq_printf(m, "Media Power Well: %s\n", - (I915_READ(VLV_GTLC_PW_STATUS) & - VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); + (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); seq_printf(m, "Render RC6 residency since boot: %u\n", I915_READ(VLV_GT_RENDER_RC6)); @@ -1773,6 +1772,50 @@ static int i915_context_status(struct seq_file *m, void *unused) return 0; } +static void i915_dump_lrc_obj(struct seq_file *m, + struct intel_engine_cs *ring, + struct drm_i915_gem_object *ctx_obj) +{ + struct page *page; + uint32_t *reg_state; + int j; + unsigned long ggtt_offset = 0; + + if (ctx_obj == NULL) { + seq_printf(m, "Context on %s with no gem object\n", + ring->name); + return; + } + + seq_printf(m, "CONTEXT: %s %u\n", ring->name, + intel_execlists_ctx_id(ctx_obj)); + + if (!i915_gem_obj_ggtt_bound(ctx_obj)) + seq_puts(m, "\tNot bound in GGTT\n"); + else + ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj); + + if (i915_gem_object_get_pages(ctx_obj)) { + seq_puts(m, "\tFailed to get pages for context object\n"); + return; + } + + page = i915_gem_object_get_page(ctx_obj, 1); + if (!WARN_ON(page == NULL)) { + reg_state = kmap_atomic(page); + + for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { + seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", + ggtt_offset + 4096 + (j * 4), + reg_state[j], reg_state[j + 1], + reg_state[j + 2], reg_state[j + 3]); + } + kunmap_atomic(reg_state); + } + + seq_putc(m, '\n'); +} + static int i915_dump_lrc(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -1793,29 +1836,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) list_for_each_entry(ctx, &dev_priv->context_list, link) { for_each_ring(ring, dev_priv, i) { - struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; - - if (ring->default_context == ctx) - continue; - - if (ctx_obj) { - struct page *page = i915_gem_object_get_page(ctx_obj, 1); - uint32_t *reg_state = kmap_atomic(page); - int j; - - seq_printf(m, "CONTEXT: %s %u\n", ring->name, - intel_execlists_ctx_id(ctx_obj)); - - for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { - seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", - i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4), - reg_state[j], reg_state[j + 1], - reg_state[j + 2], reg_state[j + 3]); - } - kunmap_atomic(reg_state); - - seq_putc(m, '\n'); - } + if (ring->default_context != ctx) + i915_dump_lrc_obj(m, ring, + ctx->engine[i].state); } } @@ -1975,6 +1998,8 @@ static int i915_swizzle_info(struct seq_file *m, void *data) if (IS_GEN3(dev) || IS_GEN4(dev)) { seq_printf(m, "DDC = 0x%08x\n", I915_READ(DCC)); + seq_printf(m, "DDC2 = 0x%08x\n", + I915_READ(DCC2)); seq_printf(m, "C0DRB3 = 0x%04x\n", I915_READ16(C0DRB3)); seq_printf(m, "C1DRB3 = 0x%04x\n", @@ -1997,6 +2022,10 @@ static int i915_swizzle_info(struct seq_file *m, void *data) seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", I915_READ(DISP_ARB_CTL)); } + + if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) + seq_puts(m, "L-shaped memory detected\n"); + intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); @@ -3309,6 +3338,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, if (pipe_crc->source && source) return -EINVAL; + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { + DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); + return -EIO; + } + if (IS_GEN2(dev)) ret = i8xx_pipe_crc_ctl_reg(&source, &val); else if (INTEL_INFO(dev)->gen < 5) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9a7353302b3f..ecee3bcc8772 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -50,884 +50,6 @@ #include <linux/pm_runtime.h> #include <linux/oom.h> -#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) - -#define BEGIN_LP_RING(n) \ - intel_ring_begin(LP_RING(dev_priv), (n)) - -#define OUT_RING(x) \ - intel_ring_emit(LP_RING(dev_priv), x) - -#define ADVANCE_LP_RING() \ - __intel_ring_advance(LP_RING(dev_priv)) - -/** - * Lock test for when it's just for synchronization of ring access. - * - * In that case, we don't need to do it when GEM is initialized as nobody else - * has access to the ring. - */ -#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ - if (LP_RING(dev->dev_private)->buffer->obj == NULL) \ - LOCK_TEST_WITH_RETURN(dev, file); \ -} while (0) - -static inline u32 -intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) -{ - if (I915_NEED_GFX_HWS(dev_priv->dev)) - return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg); - else - return intel_read_status_page(LP_RING(dev_priv), reg); -} - -#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) -#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) -#define I915_BREADCRUMB_INDEX 0x21 - -void i915_update_dri1_breadcrumb(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv; - - /* - * The dri breadcrumb update races against the drm master disappearing. - * Instead of trying to fix this (this is by far not the only ums issue) - * just don't do the update in kms mode. - */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - if (dev->primary->master) { - master_priv = dev->primary->master->driver_priv; - if (master_priv->sarea_priv) - master_priv->sarea_priv->last_dispatch = - READ_BREADCRUMB(dev_priv); - } -} - -static void i915_write_hws_pga(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 addr; - - addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(dev)->gen >= 4) - addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; - I915_WRITE(HWS_PGA, addr); -} - -/** - * Frees the hardware status page, whether it's a physical address or a virtual - * address set up by the X Server. - */ -static void i915_free_hws(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = LP_RING(dev_priv); - - if (dev_priv->status_page_dmah) { - drm_pci_free(dev, dev_priv->status_page_dmah); - dev_priv->status_page_dmah = NULL; - } - - if (ring->status_page.gfx_addr) { - ring->status_page.gfx_addr = 0; - iounmap(dev_priv->dri1.gfx_hws_cpu_addr); - } - - /* Need to rewrite hardware status page */ - I915_WRITE(HWS_PGA, 0x1ffff000); -} - -void i915_kernel_lost_context(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv; - struct intel_engine_cs *ring = LP_RING(dev_priv); - struct intel_ringbuffer *ringbuf = ring->buffer; - - /* - * We should never lose context on the ring with modesetting - * as we don't expose it to userspace - */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR; - ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; - ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE); - if (ringbuf->space < 0) - ringbuf->space += ringbuf->size; - - if (!dev->primary->master) - return; - - master_priv = dev->primary->master->driver_priv; - if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv) - master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; -} - -static int i915_dma_cleanup(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - - /* Make sure interrupts are disabled here because the uninstall ioctl - * may not have been called from userspace and after dev_private - * is freed, it's too late. - */ - if (dev->irq_enabled) - drm_irq_uninstall(dev); - - mutex_lock(&dev->struct_mutex); - for (i = 0; i < I915_NUM_RINGS; i++) - intel_cleanup_ring_buffer(&dev_priv->ring[i]); - mutex_unlock(&dev->struct_mutex); - - /* Clear the HWS virtual address at teardown */ - if (I915_NEED_GFX_HWS(dev)) - i915_free_hws(dev); - - return 0; -} - -static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - int ret; - - master_priv->sarea = drm_legacy_getsarea(dev); - if (master_priv->sarea) { - master_priv->sarea_priv = (drm_i915_sarea_t *) - ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); - } else { - DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); - } - - if (init->ring_size != 0) { - if (LP_RING(dev_priv)->buffer->obj != NULL) { - i915_dma_cleanup(dev); - DRM_ERROR("Client tried to initialize ringbuffer in " - "GEM mode\n"); - return -EINVAL; - } - - ret = intel_render_ring_init_dri(dev, - init->ring_start, - init->ring_size); - if (ret) { - i915_dma_cleanup(dev); - return ret; - } - } - - dev_priv->dri1.cpp = init->cpp; - dev_priv->dri1.back_offset = init->back_offset; - dev_priv->dri1.front_offset = init->front_offset; - dev_priv->dri1.current_page = 0; - if (master_priv->sarea_priv) - master_priv->sarea_priv->pf_current_page = 0; - - /* Allow hardware batchbuffers unless told otherwise. - */ - dev_priv->dri1.allow_batchbuffer = 1; - - return 0; -} - -static int i915_dma_resume(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = LP_RING(dev_priv); - - DRM_DEBUG_DRIVER("%s\n", __func__); - - if (ring->buffer->virtual_start == NULL) { - DRM_ERROR("can not ioremap virtual address for" - " ring buffer\n"); - return -ENOMEM; - } - - /* Program Hardware Status Page */ - if (!ring->status_page.page_addr) { - DRM_ERROR("Can not find hardware status page\n"); - return -EINVAL; - } - DRM_DEBUG_DRIVER("hw status page @ %p\n", - ring->status_page.page_addr); - if (ring->status_page.gfx_addr != 0) - intel_ring_setup_status_page(ring); - else - i915_write_hws_pga(dev); - - DRM_DEBUG_DRIVER("Enabled hardware status page\n"); - - return 0; -} - -static int i915_dma_init(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - drm_i915_init_t *init = data; - int retcode = 0; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - - switch (init->func) { - case I915_INIT_DMA: - retcode = i915_initialize(dev, init); - break; - case I915_CLEANUP_DMA: - retcode = i915_dma_cleanup(dev); - break; - case I915_RESUME_DMA: - retcode = i915_dma_resume(dev); - break; - default: - retcode = -EINVAL; - break; - } - - return retcode; -} - -/* Implement basically the same security restrictions as hardware does - * for MI_BATCH_NON_SECURE. These can be made stricter at any time. - * - * Most of the calculations below involve calculating the size of a - * particular instruction. It's important to get the size right as - * that tells us where the next instruction to check is. Any illegal - * instruction detected will be given a size of zero, which is a - * signal to abort the rest of the buffer. - */ -static int validate_cmd(int cmd) -{ - switch (((cmd >> 29) & 0x7)) { - case 0x0: - switch ((cmd >> 23) & 0x3f) { - case 0x0: - return 1; /* MI_NOOP */ - case 0x4: - return 1; /* MI_FLUSH */ - default: - return 0; /* disallow everything else */ - } - break; - case 0x1: - return 0; /* reserved */ - case 0x2: - return (cmd & 0xff) + 2; /* 2d commands */ - case 0x3: - if (((cmd >> 24) & 0x1f) <= 0x18) - return 1; - - switch ((cmd >> 24) & 0x1f) { - case 0x1c: - return 1; - case 0x1d: - switch ((cmd >> 16) & 0xff) { - case 0x3: - return (cmd & 0x1f) + 2; - case 0x4: - return (cmd & 0xf) + 2; - default: - return (cmd & 0xffff) + 2; - } - case 0x1e: - if (cmd & (1 << 23)) - return (cmd & 0xffff) + 1; - else - return 1; - case 0x1f: - if ((cmd & (1 << 23)) == 0) /* inline vertices */ - return (cmd & 0x1ffff) + 2; - else if (cmd & (1 << 17)) /* indirect random */ - if ((cmd & 0xffff) == 0) - return 0; /* unknown length, too hard */ - else - return (((cmd & 0xffff) + 1) / 2) + 1; - else - return 2; /* indirect sequential */ - default: - return 0; - } - default: - return 0; - } - - return 0; -} - -static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i, ret; - - if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8) - return -EINVAL; - - for (i = 0; i < dwords;) { - int sz = validate_cmd(buffer[i]); - - if (sz == 0 || i + sz > dwords) - return -EINVAL; - i += sz; - } - - ret = BEGIN_LP_RING((dwords+1)&~1); - if (ret) - return ret; - - for (i = 0; i < dwords; i++) - OUT_RING(buffer[i]); - if (dwords & 1) - OUT_RING(0); - - ADVANCE_LP_RING(); - - return 0; -} - -int -i915_emit_box(struct drm_device *dev, - struct drm_clip_rect *box, - int DR1, int DR4) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - if (box->y2 <= box->y1 || box->x2 <= box->x1 || - box->y2 <= 0 || box->x2 <= 0) { - DRM_ERROR("Bad box %d,%d..%d,%d\n", - box->x1, box->y1, box->x2, box->y2); - return -EINVAL; - } - - if (INTEL_INFO(dev)->gen >= 4) { - ret = BEGIN_LP_RING(4); - if (ret) - return ret; - - OUT_RING(GFX_OP_DRAWRECT_INFO_I965); - OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); - OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); - OUT_RING(DR4); - } else { - ret = BEGIN_LP_RING(6); - if (ret) - return ret; - - OUT_RING(GFX_OP_DRAWRECT_INFO); - OUT_RING(DR1); - OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); - OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); - OUT_RING(DR4); - OUT_RING(0); - } - ADVANCE_LP_RING(); - - return 0; -} - -/* XXX: Emitting the counter should really be moved to part of the IRQ - * emit. For now, do it in both places: - */ - -static void i915_emit_breadcrumb(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - - dev_priv->dri1.counter++; - if (dev_priv->dri1.counter > 0x7FFFFFFFUL) - dev_priv->dri1.counter = 0; - if (master_priv->sarea_priv) - master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; - - if (BEGIN_LP_RING(4) == 0) { - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->dri1.counter); - OUT_RING(0); - ADVANCE_LP_RING(); - } -} - -static int i915_dispatch_cmdbuffer(struct drm_device *dev, - drm_i915_cmdbuffer_t *cmd, - struct drm_clip_rect *cliprects, - void *cmdbuf) -{ - int nbox = cmd->num_cliprects; - int i = 0, count, ret; - - if (cmd->sz & 0x3) { - DRM_ERROR("alignment"); - return -EINVAL; - } - - i915_kernel_lost_context(dev); - - count = nbox ? nbox : 1; - - for (i = 0; i < count; i++) { - if (i < nbox) { - ret = i915_emit_box(dev, &cliprects[i], - cmd->DR1, cmd->DR4); - if (ret) - return ret; - } - - ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); - if (ret) - return ret; - } - - i915_emit_breadcrumb(dev); - return 0; -} - -static int i915_dispatch_batchbuffer(struct drm_device *dev, - drm_i915_batchbuffer_t *batch, - struct drm_clip_rect *cliprects) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int nbox = batch->num_cliprects; - int i, count, ret; - - if ((batch->start | batch->used) & 0x7) { - DRM_ERROR("alignment"); - return -EINVAL; - } - - i915_kernel_lost_context(dev); - - count = nbox ? nbox : 1; - for (i = 0; i < count; i++) { - if (i < nbox) { - ret = i915_emit_box(dev, &cliprects[i], - batch->DR1, batch->DR4); - if (ret) - return ret; - } - - if (!IS_I830(dev) && !IS_845G(dev)) { - ret = BEGIN_LP_RING(2); - if (ret) - return ret; - - if (INTEL_INFO(dev)->gen >= 4) { - OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); - OUT_RING(batch->start); - } else { - OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); - OUT_RING(batch->start | MI_BATCH_NON_SECURE); - } - } else { - ret = BEGIN_LP_RING(4); - if (ret) - return ret; - - OUT_RING(MI_BATCH_BUFFER); - OUT_RING(batch->start | MI_BATCH_NON_SECURE); - OUT_RING(batch->start + batch->used - 4); - OUT_RING(0); - } - ADVANCE_LP_RING(); - } - - - if (IS_G4X(dev) || IS_GEN5(dev)) { - if (BEGIN_LP_RING(2) == 0) { - OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - } - } - - i915_emit_breadcrumb(dev); - return 0; -} - -static int i915_dispatch_flip(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv = - dev->primary->master->driver_priv; - int ret; - - if (!master_priv->sarea_priv) - return -EINVAL; - - DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", - __func__, - dev_priv->dri1.current_page, - master_priv->sarea_priv->pf_current_page); - - i915_kernel_lost_context(dev); - - ret = BEGIN_LP_RING(10); - if (ret) - return ret; - - OUT_RING(MI_FLUSH | MI_READ_FLUSH); - OUT_RING(0); - - OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); - OUT_RING(0); - if (dev_priv->dri1.current_page == 0) { - OUT_RING(dev_priv->dri1.back_offset); - dev_priv->dri1.current_page = 1; - } else { - OUT_RING(dev_priv->dri1.front_offset); - dev_priv->dri1.current_page = 0; - } - OUT_RING(0); - - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); - OUT_RING(0); - - ADVANCE_LP_RING(); - - master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; - - if (BEGIN_LP_RING(4) == 0) { - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->dri1.counter); - OUT_RING(0); - ADVANCE_LP_RING(); - } - - master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; - return 0; -} - -static int i915_quiescent(struct drm_device *dev) -{ - i915_kernel_lost_context(dev); - return intel_ring_idle(LP_RING(dev->dev_private)); -} - -static int i915_flush_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - int ret; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - - RING_LOCK_TEST_WITH_RETURN(dev, file_priv); - - mutex_lock(&dev->struct_mutex); - ret = i915_quiescent(dev); - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -static int i915_batchbuffer(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv; - drm_i915_sarea_t *sarea_priv; - drm_i915_batchbuffer_t *batch = data; - int ret; - struct drm_clip_rect *cliprects = NULL; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - - master_priv = dev->primary->master->driver_priv; - sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; - - if (!dev_priv->dri1.allow_batchbuffer) { - DRM_ERROR("Batchbuffer ioctl disabled\n"); - return -EINVAL; - } - - DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", - batch->start, batch->used, batch->num_cliprects); - - RING_LOCK_TEST_WITH_RETURN(dev, file_priv); - - if (batch->num_cliprects < 0) - return -EINVAL; - - if (batch->num_cliprects) { - cliprects = kcalloc(batch->num_cliprects, - sizeof(*cliprects), - GFP_KERNEL); - if (cliprects == NULL) - return -ENOMEM; - - ret = copy_from_user(cliprects, batch->cliprects, - batch->num_cliprects * - sizeof(struct drm_clip_rect)); - if (ret != 0) { - ret = -EFAULT; - goto fail_free; - } - } - - mutex_lock(&dev->struct_mutex); - ret = i915_dispatch_batchbuffer(dev, batch, cliprects); - mutex_unlock(&dev->struct_mutex); - - if (sarea_priv) - sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); - -fail_free: - kfree(cliprects); - - return ret; -} - -static int i915_cmdbuffer(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv; - drm_i915_sarea_t *sarea_priv; - drm_i915_cmdbuffer_t *cmdbuf = data; - struct drm_clip_rect *cliprects = NULL; - void *batch_data; - int ret; - - DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", - cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - - master_priv = dev->primary->master->driver_priv; - sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; - - RING_LOCK_TEST_WITH_RETURN(dev, file_priv); - - if (cmdbuf->num_cliprects < 0) - return -EINVAL; - - batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); - if (batch_data == NULL) - return -ENOMEM; - - ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); - if (ret != 0) { - ret = -EFAULT; - goto fail_batch_free; - } - - if (cmdbuf->num_cliprects) { - cliprects = kcalloc(cmdbuf->num_cliprects, - sizeof(*cliprects), GFP_KERNEL); - if (cliprects == NULL) { - ret = -ENOMEM; - goto fail_batch_free; - } - - ret = copy_from_user(cliprects, cmdbuf->cliprects, - cmdbuf->num_cliprects * - sizeof(struct drm_clip_rect)); - if (ret != 0) { - ret = -EFAULT; - goto fail_clip_free; - } - } - - mutex_lock(&dev->struct_mutex); - ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); - mutex_unlock(&dev->struct_mutex); - if (ret) { - DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); - goto fail_clip_free; - } - - if (sarea_priv) - sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); - -fail_clip_free: - kfree(cliprects); -fail_batch_free: - kfree(batch_data); - - return ret; -} - -static int i915_emit_irq(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - - i915_kernel_lost_context(dev); - - DRM_DEBUG_DRIVER("\n"); - - dev_priv->dri1.counter++; - if (dev_priv->dri1.counter > 0x7FFFFFFFUL) - dev_priv->dri1.counter = 1; - if (master_priv->sarea_priv) - master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; - - if (BEGIN_LP_RING(4) == 0) { - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->dri1.counter); - OUT_RING(MI_USER_INTERRUPT); - ADVANCE_LP_RING(); - } - - return dev_priv->dri1.counter; -} - -static int i915_wait_irq(struct drm_device *dev, int irq_nr) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - int ret = 0; - struct intel_engine_cs *ring = LP_RING(dev_priv); - - DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, - READ_BREADCRUMB(dev_priv)); - - if (READ_BREADCRUMB(dev_priv) >= irq_nr) { - if (master_priv->sarea_priv) - master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); - return 0; - } - - if (master_priv->sarea_priv) - master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - - if (ring->irq_get(ring)) { - DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ, - READ_BREADCRUMB(dev_priv) >= irq_nr); - ring->irq_put(ring); - } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) - ret = -EBUSY; - - if (ret == -EBUSY) { - DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", - READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); - } - - return ret; -} - -/* Needs the lock as it touches the ring. - */ -static int i915_irq_emit(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - drm_i915_irq_emit_t *emit = data; - int result; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - - if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } - - RING_LOCK_TEST_WITH_RETURN(dev, file_priv); - - mutex_lock(&dev->struct_mutex); - result = i915_emit_irq(dev); - mutex_unlock(&dev->struct_mutex); - - if (copy_to_user(emit->irq_seq, &result, sizeof(int))) { - DRM_ERROR("copy_to_user\n"); - return -EFAULT; - } - - return 0; -} - -/* Doesn't need the hardware lock. - */ -static int i915_irq_wait(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - drm_i915_irq_wait_t *irqwait = data; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } - - return i915_wait_irq(dev, irqwait->irq_seq); -} - -static int i915_vblank_pipe_get(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - drm_i915_vblank_pipe_t *pipe = data; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } - - pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; - - return 0; -} - -/** - * Schedule buffer swap at given vertical blank. - */ -static int i915_vblank_swap(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - /* The delayed swap mechanism was fundamentally racy, and has been - * removed. The model was that the client requested a delayed flip/swap - * from the kernel, then waited for vblank before continuing to perform - * rendering. The problem was that the kernel might wake the client - * up before it dispatched the vblank swap (since the lock has to be - * held while touching the ringbuffer), in which case the client would - * clear and start the next frame before the swap occurred, and - * flicker would occur in addition to likely missing the vblank. - * - * In the absence of this ioctl, userland falls back to a correct path - * of waiting for a vblank, then dispatching the swap on its own. - * Context switching to userland and back is plenty fast enough for - * meeting the requirements of vblank swapping. - */ - return -EINVAL; -} - -static int i915_flip_bufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - int ret; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - - DRM_DEBUG_DRIVER("%s\n", __func__); - - RING_LOCK_TEST_WITH_RETURN(dev, file_priv); - - mutex_lock(&dev->struct_mutex); - ret = i915_dispatch_flip(dev); - mutex_unlock(&dev->struct_mutex); - - return ret; -} static int i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -936,21 +58,12 @@ static int i915_getparam(struct drm_device *dev, void *data, drm_i915_getparam_t *param = data; int value; - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } - switch (param->param) { case I915_PARAM_IRQ_ACTIVE: - value = dev->pdev->irq ? 1 : 0; - break; case I915_PARAM_ALLOW_BATCHBUFFER: - value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; - break; case I915_PARAM_LAST_DISPATCH: - value = READ_BREADCRUMB(dev_priv); - break; + /* Reject all old ums/dri params. */ + return -ENODEV; case I915_PARAM_CHIPSET_ID: value = dev->pdev->device; break; @@ -1027,6 +140,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_CMD_PARSER_VERSION: value = i915_cmd_parser_get_version(); break; + case I915_PARAM_HAS_COHERENT_PHYS_GTT: + value = 1; + break; default: DRM_DEBUG("Unknown parameter %d\n", param->param); return -EINVAL; @@ -1046,19 +162,13 @@ static int i915_setparam(struct drm_device *dev, void *data, struct drm_i915_private *dev_priv = dev->dev_private; drm_i915_setparam_t *param = data; - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } - switch (param->param) { case I915_SETPARAM_USE_MI_BATCHBUFFER_START: - break; case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: - break; case I915_SETPARAM_ALLOW_BATCHBUFFER: - dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; - break; + /* Reject all old ums/dri params. */ + return -ENODEV; + case I915_SETPARAM_NUM_USED_FENCES: if (param->value > dev_priv->num_fence_regs || param->value < 0) @@ -1075,54 +185,6 @@ static int i915_setparam(struct drm_device *dev, void *data, return 0; } -static int i915_set_status_page(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - drm_i915_hws_addr_t *hws = data; - struct intel_engine_cs *ring; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - - if (!I915_NEED_GFX_HWS(dev)) - return -EINVAL; - - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - WARN(1, "tried to set status page when mode setting active\n"); - return 0; - } - - DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); - - ring = LP_RING(dev_priv); - ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); - - dev_priv->dri1.gfx_hws_cpu_addr = - ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096); - if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { - i915_dma_cleanup(dev); - ring->status_page.gfx_addr = 0; - DRM_ERROR("can not ioremap virtual address for" - " G33 hw status page\n"); - return -ENOMEM; - } - - memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); - I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); - - DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", - ring->status_page.gfx_addr); - DRM_DEBUG_DRIVER("load hws at %p\n", - ring->status_page.page_addr); - return 0; -} - static int i915_get_bridge_dev(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1398,30 +460,6 @@ out: return ret; } -int i915_master_create(struct drm_device *dev, struct drm_master *master) -{ - struct drm_i915_master_private *master_priv; - - master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); - if (!master_priv) - return -ENOMEM; - - master->driver_priv = master_priv; - return 0; -} - -void i915_master_destroy(struct drm_device *dev, struct drm_master *master) -{ - struct drm_i915_master_private *master_priv = master->driver_priv; - - if (!master_priv) - return; - - kfree(master_priv); - - master->driver_priv = NULL; -} - #if IS_ENABLED(CONFIG_FB) static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) { @@ -1663,15 +701,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_regs; if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_kick_out_vgacon(dev_priv); + /* WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. */ + ret = i915_kick_out_firmware_fb(dev_priv); if (ret) { - DRM_ERROR("failed to remove conflicting VGA console\n"); + DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); goto out_gtt; } - ret = i915_kick_out_firmware_fb(dev_priv); + ret = i915_kick_out_vgacon(dev_priv); if (ret) { - DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); + DRM_ERROR("failed to remove conflicting VGA console\n"); goto out_gtt; } } @@ -1775,9 +815,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) DRM_ERROR("failed to init modeset\n"); goto out_power_well; } - } else { - /* Start out suspended in ums mode. */ - dev_priv->ums.mm_suspended = 1; } i915_setup_sysfs(dev); @@ -1894,9 +931,6 @@ int i915_driver_unload(struct drm_device *dev) i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); i915_gem_cleanup_stolen(dev); - - if (!I915_NEED_GFX_HWS(dev)) - i915_free_hws(dev); } intel_teardown_gmbus(dev); @@ -1946,23 +980,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file) */ void i915_driver_lastclose(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - - /* On gen6+ we refuse to init without kms enabled, but then the drm core - * goes right around and calls lastclose. Check for this and don't clean - * up anything. */ - if (!dev_priv) - return; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - intel_fbdev_restore_mode(dev); - vga_switcheroo_process_delayed_switch(); - return; - } - - i915_gem_lastclose(dev); - - i915_dma_cleanup(dev); + intel_fbdev_restore_mode(dev); + vga_switcheroo_process_delayed_switch(); } void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) @@ -1986,24 +1005,24 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) } const struct drm_ioctl_desc i915_ioctls[] = { - DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), @@ -2012,8 +1031,8 @@ const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c743908b0a7e..574057cd1d09 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -584,6 +584,8 @@ static int i915_drm_suspend(struct drm_device *dev) return error; } + intel_suspend_gt_powersave(dev); + /* * Disable CRTCs directly since we want to preserve sw state * for _thaw. Also, power gate the CRTC power wells. @@ -595,15 +597,11 @@ static int i915_drm_suspend(struct drm_device *dev) intel_dp_mst_suspend(dev); - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - intel_runtime_pm_disable_interrupts(dev_priv); intel_hpd_cancel_work(dev_priv); intel_suspend_encoders(dev_priv); - intel_suspend_gt_powersave(dev); - intel_suspend_hw(dev); } @@ -703,18 +701,17 @@ static int i915_drm_resume(struct drm_device *dev) intel_modeset_init_hw(dev); - { - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); - spin_unlock_irq(&dev_priv->irq_lock); - } + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev); + spin_unlock_irq(&dev_priv->irq_lock); - intel_dp_mst_resume(dev); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); + intel_dp_mst_resume(dev); + /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't @@ -814,6 +811,8 @@ int i915_reset(struct drm_device *dev) if (!i915.reset) return 0; + intel_reset_gt_powersave(dev); + mutex_lock(&dev->struct_mutex); i915_gem_reset(dev); @@ -856,10 +855,7 @@ int i915_reset(struct drm_device *dev) * was running at the time of the reset (i.e. we weren't VT * switched away). */ - if (drm_core_check_feature(dev, DRIVER_MODESET) || - !dev_priv->ums.mm_suspended) { - dev_priv->ums.mm_suspended = 0; - + if (drm_core_check_feature(dev, DRIVER_MODESET)) { /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ dev_priv->gpu_error.reload_in_reset = true; @@ -886,7 +882,7 @@ int i915_reset(struct drm_device *dev) * of re-init after reset. */ if (INTEL_INFO(dev)->gen > 5) - intel_reset_gt_powersave(dev); + intel_enable_gt_powersave(dev); } else { mutex_unlock(&dev->struct_mutex); } @@ -1395,9 +1391,8 @@ static int intel_runtime_suspend(struct device *device) i915_gem_release_all_mmaps(dev_priv); mutex_unlock(&dev->struct_mutex); - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - intel_runtime_pm_disable_interrupts(dev_priv); intel_suspend_gt_powersave(dev); + intel_runtime_pm_disable_interrupts(dev_priv); ret = intel_suspend_complete(dev_priv); if (ret) { @@ -1578,8 +1573,6 @@ static struct drm_driver driver = { .resume = i915_resume_legacy, .device_is_agp = i915_driver_device_is_agp, - .master_create = i915_master_create, - .master_destroy = i915_master_destroy, #if defined(CONFIG_DEBUG_FS) .debugfs_init = i915_debugfs_init, .debugfs_cleanup = i915_debugfs_cleanup, @@ -1593,7 +1586,7 @@ static struct drm_driver driver = { .gem_prime_import = i915_gem_prime_import, .dumb_create = i915_gem_dumb_create, - .dumb_map_offset = i915_gem_dumb_map_offset, + .dumb_map_offset = i915_gem_mmap_gtt, .dumb_destroy = drm_gem_dumb_destroy, .ioctls = i915_ioctls, .fops = &i915_driver_fops, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4ba1aca071da..e9f891c432f8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -55,7 +55,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20141107" +#define DRIVER_DATE "20141121" #undef WARN_ON #define WARN_ON(x) WARN(x, "WARN_ON(" #x ")") @@ -213,10 +213,15 @@ enum intel_dpll_id { /* real shared dpll ids must be >= 0 */ DPLL_ID_PCH_PLL_A = 0, DPLL_ID_PCH_PLL_B = 1, + /* hsw/bdw */ DPLL_ID_WRPLL1 = 0, DPLL_ID_WRPLL2 = 1, + /* skl */ + DPLL_ID_SKL_DPLL1 = 0, + DPLL_ID_SKL_DPLL2 = 1, + DPLL_ID_SKL_DPLL3 = 2, }; -#define I915_NUM_PLLS 2 +#define I915_NUM_PLLS 3 struct intel_dpll_hw_state { /* i9xx, pch plls */ @@ -227,6 +232,17 @@ struct intel_dpll_hw_state { /* hsw, bdw */ uint32_t wrpll; + + /* skl */ + /* + * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in + * lower part of crtl1 and they get shifted into position when writing + * the register. This allows us to easily compare the state to share + * the DPLL. + */ + uint32_t ctrl1; + /* HDMI only, 0 when used for DP */ + uint32_t cfgcr1, cfgcr2; }; struct intel_shared_dpll_config { @@ -256,6 +272,11 @@ struct intel_shared_dpll { struct intel_dpll_hw_state *hw_state); }; +#define SKL_DPLL0 0 +#define SKL_DPLL1 1 +#define SKL_DPLL2 2 +#define SKL_DPLL3 3 + /* Used by dp and fdi links */ struct intel_link_m_n { uint32_t tu; @@ -306,12 +327,6 @@ struct intel_opregion { struct intel_overlay; struct intel_overlay_error_state; -struct drm_local_map; - -struct drm_i915_master_private { - struct drm_local_map *sarea; - struct _drm_i915_sarea *sarea_priv; -}; #define I915_FENCE_REG_NONE -1 #define I915_MAX_NUM_FENCES 32 /* 32 fences + sign bit for FENCE_REG_NONE */ @@ -510,7 +525,7 @@ struct drm_i915_display_funcs { /* display clock increase/decrease */ /* pll clock increase/decrease */ - int (*setup_backlight)(struct intel_connector *connector); + int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe); uint32_t (*get_backlight)(struct intel_connector *connector); void (*set_backlight)(struct intel_connector *connector, uint32_t level); @@ -664,6 +679,7 @@ struct intel_context { struct { struct drm_i915_gem_object *state; struct intel_ringbuffer *ringbuf; + int unpin_count; } engine[I915_NUM_RINGS]; struct list_head link; @@ -748,6 +764,7 @@ enum intel_sbi_destination { #define QUIRK_INVERT_BRIGHTNESS (1<<2) #define QUIRK_BACKLIGHT_PRESENT (1<<3) #define QUIRK_PIPEB_FORCE (1<<4) +#define QUIRK_PIN_SWIZZLED_PAGES (1<<5) struct intel_fbdev; struct intel_fbc_work; @@ -799,7 +816,6 @@ struct i915_suspend_saved_registers { u32 saveBLC_HIST_CTL; u32 saveBLC_PWM_CTL; u32 saveBLC_PWM_CTL2; - u32 saveBLC_HIST_CTL_B; u32 saveBLC_CPU_PWM_CTL; u32 saveBLC_CPU_PWM_CTL2; u32 saveFPB0; @@ -908,6 +924,7 @@ struct i915_suspend_saved_registers { u32 savePIPEB_LINK_N1; u32 saveMCHBAR_RENDER_STANDBY; u32 savePCH_PORT_HOTPLUG; + u16 saveGCDGMBUS; }; struct vlv_s0ix_state { @@ -978,8 +995,12 @@ struct intel_rps_ei { }; struct intel_gen6_power_mgmt { - /* work and pm_iir are protected by dev_priv->irq_lock */ + /* + * work, interrupts_enabled and pm_iir are protected by + * dev_priv->irq_lock + */ struct work_struct work; + bool interrupts_enabled; u32 pm_iir; /* Frequencies are stored in potentially platform dependent multiples. @@ -1102,31 +1123,6 @@ struct i915_power_domains { struct i915_power_well *power_wells; }; -struct i915_dri1_state { - unsigned allow_batchbuffer : 1; - u32 __iomem *gfx_hws_cpu_addr; - - unsigned int cpp; - int back_offset; - int front_offset; - int current_page; - int page_flipping; - - uint32_t counter; -}; - -struct i915_ums_state { - /** - * Flag if the X Server, and thus DRM, is not currently in - * control of the device. - * - * This is set between LeaveVT and EnterVT. It needs to be - * replaced with a semaphore. It also needs to be - * transitioned away from for kernel modesetting. - */ - int mm_suspended; -}; - #define MAX_L3_SLICES 2 struct intel_l3_parity { u32 *remap_info[MAX_L3_SLICES]; @@ -1760,14 +1756,6 @@ struct drm_i915_private { */ struct workqueue_struct *dp_wq; - uint32_t bios_vgacntr; - - /* Old dri1 support infrastructure, beware the dragons ya fools entering - * here! */ - struct i915_dri1_state dri1; - /* Old ums support infrastructure, same warning applies. */ - struct i915_ums_state ums; - /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ struct { int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, @@ -1957,10 +1945,10 @@ struct drm_i915_gem_object { unsigned long user_pin_count; struct drm_file *pin_filp; - /** for phy allocated objects */ - struct drm_dma_handle *phys_handle; - union { + /** for phy allocated objects */ + struct drm_dma_handle *phys_handle; + struct i915_gem_userptr { uintptr_t ptr; unsigned read_only :1; @@ -2326,8 +2314,6 @@ struct i915_params { extern struct i915_params i915 __read_mostly; /* i915_dma.c */ -void i915_update_dri1_breadcrumb(struct drm_device *dev); -extern void i915_kernel_lost_context(struct drm_device * dev); extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_unload(struct drm_device *); extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); @@ -2341,9 +2327,6 @@ extern int i915_driver_device_is_agp(struct drm_device * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #endif -extern int i915_emit_box(struct drm_device *dev, - struct drm_clip_rect *box, - int DR1, int DR4); extern int intel_gpu_reset(struct drm_device *dev); extern int i915_reset(struct drm_device *dev); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); @@ -2395,8 +2378,6 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, ibx_display_interrupt_update((dev_priv), (bits), 0) /* i915_gem.c */ -int i915_gem_init_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_pread_ioctl(struct drm_device *dev, void *data, @@ -2443,10 +2424,6 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_tiling(struct drm_device *dev, void *data, @@ -2489,7 +2466,6 @@ int __must_check i915_vma_unbind(struct i915_vma *vma); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); -void i915_gem_lastclose(struct drm_device *dev); int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, int *needs_clflush); @@ -2523,9 +2499,8 @@ void i915_vma_move_to_active(struct i915_vma *vma, int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -int i915_gem_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset); +int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle, uint64_t *offset); /** * Returns true if seq1 is later than seq2. */ @@ -2956,8 +2931,8 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); void assert_force_wake_inactive(struct drm_i915_private *dev_priv); -int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); +int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); +int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); /* intel_sideband.c */ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); @@ -3055,6 +3030,11 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); } +static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) +{ + return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); +} + static inline unsigned long timespec_to_jiffies_timeout(const struct timespec *value) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 50b842231c26..c11603b4cf1d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -160,33 +160,6 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) } int -i915_gem_init_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_init *args = data; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - - if (args->gtt_start >= args->gtt_end || - (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) - return -EINVAL; - - /* GEM with user mode setting was never supported on ilk and later. */ - if (INTEL_INFO(dev)->gen >= 5) - return -ENODEV; - - mutex_lock(&dev->struct_mutex); - i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end, - args->gtt_end); - dev_priv->gtt.mappable_end = args->gtt_end; - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { @@ -208,40 +181,137 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, return 0; } -static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj) +static int +i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { - drm_dma_handle_t *phys = obj->phys_handle; + struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + char *vaddr = obj->phys_handle->vaddr; + struct sg_table *st; + struct scatterlist *sg; + int i; - if (!phys) - return; + if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) + return -EINVAL; + + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page; + char *src; + + page = shmem_read_mapping_page(mapping, i); + if (IS_ERR(page)) + return PTR_ERR(page); + + src = kmap_atomic(page); + memcpy(vaddr, src, PAGE_SIZE); + drm_clflush_virt_range(vaddr, PAGE_SIZE); + kunmap_atomic(src); + + page_cache_release(page); + vaddr += PAGE_SIZE; + } + + i915_gem_chipset_flush(obj->base.dev); + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (st == NULL) + return -ENOMEM; + + if (sg_alloc_table(st, 1, GFP_KERNEL)) { + kfree(st); + return -ENOMEM; + } + + sg = st->sgl; + sg->offset = 0; + sg->length = obj->base.size; - if (obj->madv == I915_MADV_WILLNEED) { + sg_dma_address(sg) = obj->phys_handle->busaddr; + sg_dma_len(sg) = obj->base.size; + + obj->pages = st; + obj->has_dma_mapping = true; + return 0; +} + +static void +i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) +{ + int ret; + + BUG_ON(obj->madv == __I915_MADV_PURGED); + + ret = i915_gem_object_set_to_cpu_domain(obj, true); + if (ret) { + /* In the event of a disaster, abandon all caches and + * hope for the best. + */ + WARN_ON(ret != -EIO); + obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + + if (obj->madv == I915_MADV_DONTNEED) + obj->dirty = 0; + + if (obj->dirty) { struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; - char *vaddr = phys->vaddr; + char *vaddr = obj->phys_handle->vaddr; int i; for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { - struct page *page = shmem_read_mapping_page(mapping, i); - if (!IS_ERR(page)) { - char *dst = kmap_atomic(page); - memcpy(dst, vaddr, PAGE_SIZE); - drm_clflush_virt_range(dst, PAGE_SIZE); - kunmap_atomic(dst); - - set_page_dirty(page); + struct page *page; + char *dst; + + page = shmem_read_mapping_page(mapping, i); + if (IS_ERR(page)) + continue; + + dst = kmap_atomic(page); + drm_clflush_virt_range(vaddr, PAGE_SIZE); + memcpy(dst, vaddr, PAGE_SIZE); + kunmap_atomic(dst); + + set_page_dirty(page); + if (obj->madv == I915_MADV_WILLNEED) mark_page_accessed(page); - page_cache_release(page); - } + page_cache_release(page); vaddr += PAGE_SIZE; } - i915_gem_chipset_flush(obj->base.dev); + obj->dirty = 0; } -#ifdef CONFIG_X86 - set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); -#endif - drm_pci_free(obj->base.dev, phys); - obj->phys_handle = NULL; + sg_free_table(obj->pages); + kfree(obj->pages); + + obj->has_dma_mapping = false; +} + +static void +i915_gem_object_release_phys(struct drm_i915_gem_object *obj) +{ + drm_pci_free(obj->base.dev, obj->phys_handle); +} + +static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { + .get_pages = i915_gem_object_get_pages_phys, + .put_pages = i915_gem_object_put_pages_phys, + .release = i915_gem_object_release_phys, +}; + +static int +drop_pages(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma, *next; + int ret; + + drm_gem_object_reference(&obj->base); + list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) + if (i915_vma_unbind(vma)) + break; + + ret = i915_gem_object_put_pages(obj); + drm_gem_object_unreference(&obj->base); + + return ret; } int @@ -249,9 +319,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) { drm_dma_handle_t *phys; - struct address_space *mapping; - char *vaddr; - int i; + int ret; if (obj->phys_handle) { if ((unsigned long)obj->phys_handle->vaddr & (align -1)) @@ -266,41 +334,19 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, if (obj->base.filp == NULL) return -EINVAL; + ret = drop_pages(obj); + if (ret) + return ret; + /* create a new object */ phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); if (!phys) return -ENOMEM; - vaddr = phys->vaddr; -#ifdef CONFIG_X86 - set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE); -#endif - mapping = file_inode(obj->base.filp)->i_mapping; - for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { - struct page *page; - char *src; - - page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) { -#ifdef CONFIG_X86 - set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); -#endif - drm_pci_free(obj->base.dev, phys); - return PTR_ERR(page); - } - - src = kmap_atomic(page); - memcpy(vaddr, src, PAGE_SIZE); - kunmap_atomic(src); - - mark_page_accessed(page); - page_cache_release(page); - - vaddr += PAGE_SIZE; - } - obj->phys_handle = phys; - return 0; + obj->ops = &i915_gem_phys_ops; + + return i915_gem_object_get_pages(obj); } static int @@ -311,6 +357,14 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_device *dev = obj->base.dev; void *vaddr = obj->phys_handle->vaddr + args->offset; char __user *user_data = to_user_ptr(args->data_ptr); + int ret; + + /* We manually control the domain here and pretend that it + * remains coherent i.e. in the GTT domain, like shmem_pwrite. + */ + ret = i915_gem_object_wait_rendering(obj, false); + if (ret) + return ret; if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { unsigned long unwritten; @@ -326,6 +380,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, return -EFAULT; } + drm_clflush_virt_range(vaddr, args->size); i915_gem_chipset_flush(dev); return 0; } @@ -346,7 +401,6 @@ static int i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size, - bool dumb, uint32_t *handle_p) { struct drm_i915_gem_object *obj; @@ -362,7 +416,6 @@ i915_gem_create(struct drm_file *file, if (obj == NULL) return -ENOMEM; - obj->base.dumb = dumb; ret = drm_gem_handle_create(file, &obj->base, &handle); /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(&obj->base); @@ -382,7 +435,7 @@ i915_gem_dumb_create(struct drm_file *file, args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); args->size = args->pitch * args->height; return i915_gem_create(file, dev, - args->size, true, &args->handle); + args->size, &args->handle); } /** @@ -395,7 +448,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_create *args = data; return i915_gem_create(file, dev, - args->size, false, &args->handle); + args->size, &args->handle); } static inline int @@ -995,6 +1048,7 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_pwrite *args = data; struct drm_i915_gem_object *obj; int ret; @@ -1014,9 +1068,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, return -EFAULT; } + intel_runtime_pm_get(dev_priv); + ret = i915_mutex_lock_interruptible(dev); if (ret) - return ret; + goto put_rpm; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { @@ -1048,11 +1104,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (obj->phys_handle) { - ret = i915_gem_phys_pwrite(obj, args, file); - goto out; - } - if (obj->tiling_mode == I915_TILING_NONE && obj->base.write_domain != I915_GEM_DOMAIN_CPU && cpu_write_needs_clflush(obj)) { @@ -1062,13 +1113,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * textures). Fallback to the shmem path in that case. */ } - if (ret == -EFAULT || ret == -ENOSPC) - ret = i915_gem_shmem_pwrite(dev, obj, args, file); + if (ret == -EFAULT || ret == -ENOSPC) { + if (obj->phys_handle) + ret = i915_gem_phys_pwrite(obj, args, file); + else + ret = i915_gem_shmem_pwrite(dev, obj, args, file); + } out: drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); +put_rpm: + intel_runtime_pm_put(dev_priv); + return ret; } @@ -1173,7 +1231,8 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) return 0; - timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0; + timeout_expire = timeout ? + jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { gen6_rps_boost(dev_priv); @@ -1249,6 +1308,16 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, s64 tres = *timeout - (now - before); *timeout = tres < 0 ? 0 : tres; + + /* + * Apparently ktime isn't accurate enough and occasionally has a + * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch + * things up to make the test happy. We allow up to 1 jiffy. + * + * This is a regrssion from the timespec->ktime conversion. + */ + if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000) + *timeout = 0; } return ret; @@ -1775,10 +1844,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) drm_gem_free_mmap_offset(&obj->base); } -static int +int i915_gem_mmap_gtt(struct drm_file *file, struct drm_device *dev, - uint32_t handle, bool dumb, + uint32_t handle, uint64_t *offset) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1795,13 +1864,6 @@ i915_gem_mmap_gtt(struct drm_file *file, goto unlock; } - /* - * We don't allow dumb mmaps on objects created using another - * interface. - */ - WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach), - "Illegal dumb map of accelerated buffer.\n"); - if (obj->base.size > dev_priv->gtt.mappable_end) { ret = -E2BIG; goto out; @@ -1826,15 +1888,6 @@ unlock: return ret; } -int -i915_gem_dumb_map_offset(struct drm_file *file, - struct drm_device *dev, - uint32_t handle, - uint64_t *offset) -{ - return i915_gem_mmap_gtt(file, dev, handle, true, offset); -} - /** * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing * @dev: DRM device @@ -1856,7 +1909,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_mmap_gtt *args = data; - return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset); + return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); } static inline int @@ -2140,6 +2193,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj); + if (obj->tiling_mode != I915_TILING_NONE && + dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) + i915_gem_object_pin_pages(obj); + return 0; err_pages: @@ -2438,15 +2495,13 @@ int __i915_add_request(struct intel_engine_cs *ring, ring->outstanding_lazy_seqno = 0; ring->preallocated_lazy_request = NULL; - if (!dev_priv->ums.mm_suspended) { - i915_queue_hangcheck(ring->dev); + i915_queue_hangcheck(ring->dev); - cancel_delayed_work_sync(&dev_priv->mm.idle_work); - queue_delayed_work(dev_priv->wq, - &dev_priv->mm.retire_work, - round_jiffies_up_relative(HZ)); - intel_mark_busy(dev_priv->dev); - } + cancel_delayed_work_sync(&dev_priv->mm.idle_work); + queue_delayed_work(dev_priv->wq, + &dev_priv->mm.retire_work, + round_jiffies_up_relative(HZ)); + intel_mark_busy(dev_priv->dev); if (out_seqno) *out_seqno = request->seqno; @@ -2513,12 +2568,20 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv, static void i915_gem_free_request(struct drm_i915_gem_request *request) { + struct intel_context *ctx = request->ctx; + list_del(&request->list); i915_gem_request_remove_from_client(request); - if (request->ctx) - i915_gem_context_unreference(request->ctx); + if (ctx) { + if (i915.enable_execlists) { + struct intel_engine_cs *ring = request->ring; + if (ctx != ring->default_context) + intel_lr_context_unpin(ring, ctx); + } + i915_gem_context_unreference(ctx); + } kfree(request); } @@ -2573,6 +2636,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, } /* + * Clear the execlists queue up before freeing the requests, as those + * are the ones that keep the context and ringbuffer backing objects + * pinned in place. + */ + while (!list_empty(&ring->execlist_queue)) { + struct intel_ctx_submit_request *submit_req; + + submit_req = list_first_entry(&ring->execlist_queue, + struct intel_ctx_submit_request, + execlist_link); + list_del(&submit_req->execlist_link); + intel_runtime_pm_put(dev_priv); + i915_gem_context_unreference(submit_req->ctx); + kfree(submit_req); + } + + /* * We must free the requests after all the corresponding objects have * been moved off active lists. Which is the same order as the normal * retire_requests function does. This is important if object hold @@ -2589,18 +2669,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, i915_gem_free_request(request); } - while (!list_empty(&ring->execlist_queue)) { - struct intel_ctx_submit_request *submit_req; - - submit_req = list_first_entry(&ring->execlist_queue, - struct intel_ctx_submit_request, - execlist_link); - list_del(&submit_req->execlist_link); - intel_runtime_pm_put(dev_priv); - i915_gem_context_unreference(submit_req->ctx); - kfree(submit_req); - } - /* These may not have been flush before the reset, do so now */ kfree(ring->preallocated_lazy_request); ring->preallocated_lazy_request = NULL; @@ -2737,6 +2805,15 @@ i915_gem_retire_requests(struct drm_device *dev) for_each_ring(ring, dev_priv, i) { i915_gem_retire_requests_ring(ring); idle &= list_empty(&ring->request_list); + if (i915.enable_execlists) { + unsigned long flags; + + spin_lock_irqsave(&ring->execlist_lock, flags); + idle &= list_empty(&ring->execlist_queue); + spin_unlock_irqrestore(&ring->execlist_lock, flags); + + intel_execlists_retire_requests(ring); + } } if (idle) @@ -3527,7 +3604,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj, * Stolen memory is always coherent with the GPU as it is explicitly * marked as wc by the system, or the system is cache-coherent. */ - if (obj->stolen) + if (obj->stolen || obj->phys_handle) return false; /* If the GPU is snooping the contents of the CPU cache, @@ -4187,7 +4264,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj; int ret; - if (INTEL_INFO(dev)->gen >= 6) + if (drm_core_check_feature(dev, DRIVER_MODESET)) return -ENODEV; ret = i915_mutex_lock_interruptible(dev); @@ -4243,6 +4320,9 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj; int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -4320,6 +4400,7 @@ int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_madvise *args = data; struct drm_i915_gem_object *obj; int ret; @@ -4347,6 +4428,15 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, goto out; } + if (obj->pages && + obj->tiling_mode != I915_TILING_NONE && + dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + if (obj->madv == I915_MADV_WILLNEED) + i915_gem_object_unpin_pages(obj); + if (args->madv == I915_MADV_WILLNEED) + i915_gem_object_pin_pages(obj); + } + if (obj->madv != __I915_MADV_PURGED) obj->madv = args->madv; @@ -4489,8 +4579,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) } } - i915_gem_object_detach_phys(obj); - /* Stolen objects don't hold a ref, but do hold pin count. Fix that up * before progressing. */ if (obj->stolen) @@ -4498,6 +4586,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) WARN_ON(obj->frontbuffer_bits); + if (obj->pages && obj->madv == I915_MADV_WILLNEED && + dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES && + obj->tiling_mode != I915_TILING_NONE) + i915_gem_object_unpin_pages(obj); + if (WARN_ON(obj->pages_pin_count)) obj->pages_pin_count = 0; if (discard_backing_storage(obj)) @@ -4570,9 +4663,6 @@ i915_gem_suspend(struct drm_device *dev) int ret = 0; mutex_lock(&dev->struct_mutex); - if (dev_priv->ums.mm_suspended) - goto err; - ret = i915_gpu_idle(dev); if (ret) goto err; @@ -4583,15 +4673,7 @@ i915_gem_suspend(struct drm_device *dev) if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_gem_evict_everything(dev); - i915_kernel_lost_context(dev); i915_gem_stop_ringbuffers(dev); - - /* Hack! Don't let anybody do execbuf while we don't control the chip. - * We need to replace this with a semaphore, or something. - * And not confound ums.mm_suspended! - */ - dev_priv->ums.mm_suspended = !drm_core_check_feature(dev, - DRIVER_MODESET); mutex_unlock(&dev->struct_mutex); del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); @@ -4882,9 +4964,6 @@ int i915_gem_init(struct drm_device *dev) } mutex_unlock(&dev->struct_mutex); - /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */ - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - dev_priv->dri1.allow_batchbuffer = 1; return ret; } @@ -4899,74 +4978,6 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) dev_priv->gt.cleanup_ring(ring); } -int -i915_gem_entervt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return 0; - - if (i915_reset_in_progress(&dev_priv->gpu_error)) { - DRM_ERROR("Reenabling wedged hardware, good luck\n"); - atomic_set(&dev_priv->gpu_error.reset_counter, 0); - } - - mutex_lock(&dev->struct_mutex); - dev_priv->ums.mm_suspended = 0; - - ret = i915_gem_init_hw(dev); - if (ret != 0) { - mutex_unlock(&dev->struct_mutex); - return ret; - } - - BUG_ON(!list_empty(&dev_priv->gtt.base.active_list)); - - ret = drm_irq_install(dev, dev->pdev->irq); - if (ret) - goto cleanup_ringbuffer; - mutex_unlock(&dev->struct_mutex); - - return 0; - -cleanup_ringbuffer: - i915_gem_cleanup_ringbuffer(dev); - dev_priv->ums.mm_suspended = 1; - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -int -i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return 0; - - mutex_lock(&dev->struct_mutex); - drm_irq_uninstall(dev); - mutex_unlock(&dev->struct_mutex); - - return i915_gem_suspend(dev); -} - -void -i915_gem_lastclose(struct drm_device *dev) -{ - int ret; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - ret = i915_gem_suspend(dev); - if (ret) - DRM_ERROR("failed to idle hardware: %d\n", ret); -} - static void init_ring_lists(struct intel_engine_cs *ring) { diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 7d3257111737..d011ec82ef1e 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -88,6 +88,7 @@ #include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" +#include "i915_trace.h" /* This is a HW constraint. The value below is the largest known requirement * I've seen in a spec to date, and that was a workaround for a non-shipping @@ -137,6 +138,8 @@ void i915_gem_context_free(struct kref *ctx_ref) struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); + trace_i915_context_free(ctx); + if (i915.enable_execlists) intel_lr_context_free(ctx); @@ -274,6 +277,8 @@ i915_gem_create_context(struct drm_device *dev, ctx->ppgtt = ppgtt; } + trace_i915_context_create(ctx); + return ctx; err_unpin: @@ -468,7 +473,12 @@ mi_set_context(struct intel_engine_cs *ring, u32 hw_flags) { u32 flags = hw_flags | MI_MM_SPACE_GTT; - int ret; + const int num_rings = + /* Use an extended w/a on ivb+ if signalling from other rings */ + i915_semaphore_is_enabled(ring->dev) ? + hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 : + 0; + int len, i, ret; /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value @@ -485,15 +495,31 @@ mi_set_context(struct intel_engine_cs *ring, if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8) flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); - ret = intel_ring_begin(ring, 6); + + len = 4; + if (INTEL_INFO(ring->dev)->gen >= 7) + len += 2 + (num_rings ? 4*num_rings + 2 : 0); + + ret = intel_ring_begin(ring, len); if (ret) return ret; /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ - if (INTEL_INFO(ring->dev)->gen >= 7) + if (INTEL_INFO(ring->dev)->gen >= 7) { intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); - else - intel_ring_emit(ring, MI_NOOP); + if (num_rings) { + struct intel_engine_cs *signaller; + + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); + for_each_ring(signaller, to_i915(ring->dev), i) { + if (signaller == ring) + continue; + + intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); + intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); + } + } + } intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_SET_CONTEXT); @@ -505,10 +531,21 @@ mi_set_context(struct intel_engine_cs *ring, */ intel_ring_emit(ring, MI_NOOP); - if (INTEL_INFO(ring->dev)->gen >= 7) + if (INTEL_INFO(ring->dev)->gen >= 7) { + if (num_rings) { + struct intel_engine_cs *signaller; + + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); + for_each_ring(signaller, to_i915(ring->dev), i) { + if (signaller == ring) + continue; + + intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); + intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); + } + } intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); - else - intel_ring_emit(ring, MI_NOOP); + } intel_ring_advance(ring); @@ -549,6 +586,7 @@ static int do_switch(struct intel_engine_cs *ring, from = ring->last_context; if (to->ppgtt) { + trace_switch_mm(ring, to); ret = to->ppgtt->switch_mm(to->ppgtt, ring); if (ret) goto unpin_out; @@ -629,7 +667,7 @@ done: if (uninitialized) { if (ring->init_context) { - ret = ring->init_context(ring); + ret = ring->init_context(ring, to); if (ret) DRM_ERROR("ring init context: %d\n", ret); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2b02fcfae534..11738316394a 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -121,9 +121,6 @@ eb_lookup_vmas(struct eb_vmas *eb, goto err; } - WARN_ONCE(obj->base.dumb, - "GPU use of dumb buffer is illegal.\n"); - drm_gem_object_reference(&obj->base); list_add_tail(&obj->obj_exec_link, &objects); } @@ -1023,6 +1020,47 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, return 0; } +static int +i915_emit_box(struct intel_engine_cs *ring, + struct drm_clip_rect *box, + int DR1, int DR4) +{ + int ret; + + if (box->y2 <= box->y1 || box->x2 <= box->x1 || + box->y2 <= 0 || box->x2 <= 0) { + DRM_ERROR("Bad box %d,%d..%d,%d\n", + box->x1, box->y1, box->x2, box->y2); + return -EINVAL; + } + + if (INTEL_INFO(ring->dev)->gen >= 4) { + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965); + intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16); + intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16); + intel_ring_emit(ring, DR4); + } else { + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO); + intel_ring_emit(ring, DR1); + intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16); + intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16); + intel_ring_emit(ring, DR4); + intel_ring_emit(ring, 0); + } + intel_ring_advance(ring); + + return 0; +} + + int i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, struct intel_engine_cs *ring, @@ -1151,7 +1189,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, exec_len = args->batch_len; if (cliprects) { for (i = 0; i < args->num_cliprects; i++) { - ret = i915_emit_box(dev, &cliprects[i], + ret = i915_emit_box(ring, &cliprects[i], args->DR1, args->DR4); if (ret) goto error; @@ -1300,12 +1338,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) goto pre_mutex_err; - if (dev_priv->ums.mm_suspended) { - mutex_unlock(&dev->struct_mutex); - ret = -EBUSY; - goto pre_mutex_err; - } - ctx = i915_gem_validate_context(dev, file, ring, ctx_id); if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index de12017c809b..171f6eafdeee 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -43,7 +43,12 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) if (IS_GEN8(dev)) has_full_ppgtt = false; /* XXX why? */ - if (enable_ppgtt == 0 || !has_aliasing_ppgtt) + /* + * We don't allow disabling PPGTT for gen9+ as it's a requirement for + * execlists, the sole mechanism available to submit work. + */ + if (INTEL_INFO(dev)->gen < 9 && + (enable_ppgtt == 0 || !has_aliasing_ppgtt)) return 0; if (enable_ppgtt == 1) @@ -164,9 +169,6 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); - /* Mark the page as writeable. Other platforms don't have a - * setting for read-only/writable, so this matches that behavior. - */ if (!(flags & PTE_READ_ONLY)) pte |= BYT_PTE_WRITEABLE; @@ -1174,6 +1176,8 @@ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) ppgtt->file_priv = fpriv; + trace_i915_ppgtt_create(&ppgtt->base); + return ppgtt; } @@ -1182,6 +1186,8 @@ void i915_ppgtt_release(struct kref *kref) struct i915_hw_ppgtt *ppgtt = container_of(kref, struct i915_hw_ppgtt, ref); + trace_i915_ppgtt_release(&ppgtt->base); + /* vmas should already be unbound */ WARN_ON(!list_empty(&ppgtt->base.active_list)); WARN_ON(!list_empty(&ppgtt->base.inactive_list)); @@ -1658,10 +1664,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node, } } -int i915_gem_setup_global_gtt(struct drm_device *dev, - unsigned long start, - unsigned long mappable_end, - unsigned long end) +static int i915_gem_setup_global_gtt(struct drm_device *dev, + unsigned long start, + unsigned long mappable_end, + unsigned long end) { /* Let GEM Manage all of the aperture. * @@ -1920,6 +1926,22 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + if (!USES_PPGTT(dev_priv->dev)) + /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, + * so RTL will always use the value corresponding to + * pat_sel = 000". + * So let's disable cache for GGTT to avoid screen corruptions. + * MOCS still can be used though. + * - System agent ggtt writes (i.e. cpu gtt mmaps) already work + * before this patch, i.e. the same uncached + snooping access + * like on gen6/7 seems to be in effect. + * - So this just fixes blitter/render access. Again it looks + * like it's not just uncached access, but uncached + snooping. + * So we can still hold onto all our assumptions wrt cpu + * clflushing on LLC machines. + */ + pat = GEN8_PPAT(0, GEN8_PPAT_UC); + /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b * write would work. */ I915_WRITE(GEN8_PRIVATE_PAT, pat); @@ -1936,9 +1958,17 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) * Only the snoop bit has meaning for CHV, the rest is * ignored. * - * Note that the harware enforces snooping for all page - * table accesses. The snoop bit is actually ignored for - * PDEs. + * The hardware will never snoop for certain types of accesses: + * - CPU GTT (GMADR->GGTT->no snoop->memory) + * - PPGTT page tables + * - some other special cycles + * + * As with BDW, we also need to consider the following for GT accesses: + * "For GGTT, there is NO pat_sel[2:0] from the entry, + * so RTL will always use the value corresponding to + * pat_sel = 000". + * Which means we must set the snoop bit in PAT entry 0 + * in order to keep the global status page working. */ pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | GEN8_PPAT(1, 0) | diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index d0562d0ef6ec..beaf4bcfdac8 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -274,8 +274,6 @@ struct i915_hw_ppgtt { int i915_gem_gtt_init(struct drm_device *dev); void i915_gem_init_global_gtt(struct drm_device *dev); -int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, - unsigned long mappable_end, unsigned long end); void i915_global_gtt_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index c38891892547..a2045848bd1a 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -137,7 +137,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) r = devm_request_mem_region(dev->dev, base + 1, dev_priv->gtt.stolen_size - 1, "Graphics Stolen Memory"); - if (r == NULL) { + /* + * GEN3 firmware likes to smash pci bridges into the stolen + * range. Apparently this works. + */ + if (r == NULL && !IS_GEN3(dev)) { DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", base, base + (uint32_t)dev_priv->gtt.stolen_size); base = 0; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 749ab485569e..4727a4e2c87c 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -178,6 +178,15 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) } break; } + + /* check for L-shaped memory aka modified enhanced addressing */ + if (IS_GEN4(dev)) { + uint32_t ddc2 = I915_READ(DCC2); + + if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) + dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; + } + if (dcc == 0xffffffff) { DRM_ERROR("Couldn't read from MCHBAR. " "Disabling tiling.\n"); @@ -375,24 +384,20 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, * has to also include the unfenced register the GPU uses * whilst executing a fenced command for an untiled object. */ - - obj->map_and_fenceable = - !i915_gem_obj_ggtt_bound(obj) || - (i915_gem_obj_ggtt_offset(obj) + - obj->base.size <= dev_priv->gtt.mappable_end && - i915_gem_object_fence_ok(obj, args->tiling_mode)); - - /* Rebind if we need a change of alignment */ - if (!obj->map_and_fenceable) { - u32 unfenced_align = - i915_gem_get_gtt_alignment(dev, obj->base.size, - args->tiling_mode, - false); - if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1)) - ret = i915_gem_object_ggtt_unbind(obj); - } + if (obj->map_and_fenceable && + !i915_gem_object_fence_ok(obj, args->tiling_mode)) + ret = i915_gem_object_ggtt_unbind(obj); if (ret == 0) { + if (obj->pages && + obj->madv == I915_MADV_WILLNEED && + dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + if (args->tiling_mode == I915_TILING_NONE) + i915_gem_object_unpin_pages(obj); + if (obj->tiling_mode == I915_TILING_NONE) + i915_gem_object_pin_pages(obj); + } + obj->fence_dirty = obj->last_fenced_seqno || obj->fence_reg != I915_FENCE_REG_NONE; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 89a2f3dbf956..cdaee6ce05f8 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -242,11 +242,15 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a) static void i915_ring_error_state(struct drm_i915_error_state_buf *m, struct drm_device *dev, - struct drm_i915_error_ring *ring) + struct drm_i915_error_state *error, + int ring_idx) { + struct drm_i915_error_ring *ring = &error->ring[ring_idx]; + if (!ring->valid) return; + err_printf(m, "%s command stream:\n", ring_str(ring_idx)); err_printf(m, " HEAD: 0x%08x\n", ring->head); err_printf(m, " TAIL: 0x%08x\n", ring->tail); err_printf(m, " CTL: 0x%08x\n", ring->ctl); @@ -388,10 +392,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, if (INTEL_INFO(dev)->gen == 7) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); - for (i = 0; i < ARRAY_SIZE(error->ring); i++) { - err_printf(m, "%s command stream:\n", ring_str(i)); - i915_ring_error_state(m, dev, &error->ring[i]); - } + for (i = 0; i < ARRAY_SIZE(error->ring); i++) + i915_ring_error_state(m, dev, error, i); for (i = 0; i < error->vm_count; i++) { err_printf(m, "vm[%d]\n", i); @@ -807,9 +809,8 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, if (!error->semaphore_obj) error->semaphore_obj = - i915_error_object_create(dev_priv, - dev_priv->semaphore_obj, - &dev_priv->gtt.base); + i915_error_ggtt_object_create(dev_priv, + dev_priv->semaphore_obj); for_each_ring(to, dev_priv, i) { int idx; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5fff2870a17b..d0d3dfbe6d2a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -138,6 +138,8 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ POSTING_READ(type##IMR); \ } while (0) +static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); + /* For display hotplug interrupt */ void ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) @@ -200,6 +202,21 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) ilk_update_gt_irq(dev_priv, mask, 0); } +static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) +{ + return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; +} + +static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) +{ + return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; +} + +static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) +{ + return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; +} + /** * snb_update_pm_irq - update GEN6_PMIMR * @dev_priv: driver private @@ -214,68 +231,91 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return; - new_val = dev_priv->pm_irq_mask; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); if (new_val != dev_priv->pm_irq_mask) { dev_priv->pm_irq_mask = new_val; - I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); - POSTING_READ(GEN6_PMIMR); + I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); + POSTING_READ(gen6_pm_imr(dev_priv)); } } void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) { + if (WARN_ON(!intel_irqs_enabled(dev_priv))) + return; + snb_update_pm_irq(dev_priv, mask, mask); } -void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, + uint32_t mask) { snb_update_pm_irq(dev_priv, mask, 0); } -/** - * bdw_update_pm_irq - update GT interrupt 2 - * @dev_priv: driver private - * @interrupt_mask: mask of interrupt bits to update - * @enabled_irq_mask: mask of interrupt bits to enable - * - * Copied from the snb function, updated with relevant register offsets - */ -static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) +void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) { - uint32_t new_val; - - assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; - new_val = dev_priv->pm_irq_mask; - new_val &= ~interrupt_mask; - new_val |= (~enabled_irq_mask & interrupt_mask); + __gen6_disable_pm_irq(dev_priv, mask); +} - if (new_val != dev_priv->pm_irq_mask) { - dev_priv->pm_irq_mask = new_val; - I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask); - POSTING_READ(GEN8_GT_IMR(2)); - } +void gen6_reset_rps_interrupts(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t reg = gen6_pm_iir(dev_priv); + + spin_lock_irq(&dev_priv->irq_lock); + I915_WRITE(reg, dev_priv->pm_rps_events); + I915_WRITE(reg, dev_priv->pm_rps_events); + POSTING_READ(reg); + spin_unlock_irq(&dev_priv->irq_lock); } -void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen6_enable_rps_interrupts(struct drm_device *dev) { - bdw_update_pm_irq(dev_priv, mask, mask); + struct drm_i915_private *dev_priv = dev->dev_private; + + spin_lock_irq(&dev_priv->irq_lock); + + WARN_ON(dev_priv->rps.pm_iir); + WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); + dev_priv->rps.interrupts_enabled = true; + I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | + dev_priv->pm_rps_events); + gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + + spin_unlock_irq(&dev_priv->irq_lock); } -void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen6_disable_rps_interrupts(struct drm_device *dev) { - bdw_update_pm_irq(dev_priv, mask, 0); + struct drm_i915_private *dev_priv = dev->dev_private; + + spin_lock_irq(&dev_priv->irq_lock); + dev_priv->rps.interrupts_enabled = false; + spin_unlock_irq(&dev_priv->irq_lock); + + cancel_work_sync(&dev_priv->rps.work); + + spin_lock_irq(&dev_priv->irq_lock); + + I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ? + ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0); + + __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); + I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & + ~dev_priv->pm_rps_events); + I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); + I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); + + dev_priv->rps.pm_iir = 0; + + spin_unlock_irq(&dev_priv->irq_lock); } /** @@ -980,7 +1020,6 @@ static void notify_ring(struct drm_device *dev, trace_i915_gem_request_complete(ring); wake_up_all(&ring->irq_queue); - i915_queue_hangcheck(dev); } static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, @@ -1116,14 +1155,15 @@ static void gen6_pm_rps_work(struct work_struct *work) int new_delay, adj; spin_lock_irq(&dev_priv->irq_lock); + /* Speed up work cancelation during disabling rps interrupts. */ + if (!dev_priv->rps.interrupts_enabled) { + spin_unlock_irq(&dev_priv->irq_lock); + return; + } pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; - if (INTEL_INFO(dev_priv->dev)->gen >= 8) - gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); - else { - /* Make sure not to corrupt PMIMR state used by ringbuffer */ - gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); - } + /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ + gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); /* Make sure we didn't queue anything we're not going to process. */ @@ -1316,28 +1356,13 @@ static void snb_gt_irq_handler(struct drm_device *dev, if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT | - GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { - i915_handle_error(dev, false, "GT error interrupt 0x%08x", - gt_iir); - } + GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) + DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); if (gt_iir & GT_PARITY_ERROR(dev)) ivybridge_parity_error_irq_handler(dev, gt_iir); } -static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) -{ - if ((pm_iir & dev_priv->pm_rps_events) == 0) - return; - - spin_lock(&dev_priv->irq_lock); - dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; - gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); - spin_unlock(&dev_priv->irq_lock); - - queue_work(dev_priv->wq, &dev_priv->rps.work); -} - static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, struct drm_i915_private *dev_priv, u32 master_ctl) @@ -1399,7 +1424,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, I915_WRITE(GEN8_GT_IIR(2), tmp & dev_priv->pm_rps_events); ret = IRQ_HANDLED; - gen8_rps_irq_handler(dev_priv, tmp); + gen6_rps_irq_handler(dev_priv, tmp); } else DRM_ERROR("The master control interrupt lied (PM)!\n"); } @@ -1613,7 +1638,7 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, if (!pipe_crc->entries) { spin_unlock(&pipe_crc->lock); - DRM_ERROR("spurious interrupt\n"); + DRM_DEBUG_KMS("spurious interrupt\n"); return; } @@ -1699,24 +1724,30 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) * the work queue. */ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) { + /* TODO: RPS on GEN9+ is not supported yet. */ + if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, + "GEN9+: unexpected RPS IRQ\n")) + return; + if (pm_iir & dev_priv->pm_rps_events) { spin_lock(&dev_priv->irq_lock); - dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); + if (dev_priv->rps.interrupts_enabled) { + dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; + queue_work(dev_priv->wq, &dev_priv->rps.work); + } spin_unlock(&dev_priv->irq_lock); - - queue_work(dev_priv->wq, &dev_priv->rps.work); } + if (INTEL_INFO(dev_priv)->gen >= 8) + return; + if (HAS_VEBOX(dev_priv->dev)) { if (pm_iir & PM_VEBOX_USER_INTERRUPT) notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); - if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { - i915_handle_error(dev_priv->dev, false, - "VEBOX CS error interrupt 0x%08x", - pm_iir); - } + if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); } } @@ -2222,6 +2253,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) irqreturn_t ret = IRQ_NONE; uint32_t tmp = 0; enum pipe pipe; + u32 aux_mask = GEN8_AUX_CHANNEL_A; + + if (IS_GEN9(dev)) + aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | + GEN9_AUX_CHANNEL_D; master_ctl = I915_READ(GEN8_MASTER_IRQ); master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; @@ -2254,7 +2290,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) if (tmp) { I915_WRITE(GEN8_DE_PORT_IIR, tmp); ret = IRQ_HANDLED; - if (tmp & GEN8_AUX_CHANNEL_A) + + if (tmp & aux_mask) dp_aux_irq_handler(dev); else DRM_ERROR("Unexpected DE Port interrupt\n"); @@ -2403,6 +2440,9 @@ static void i915_error_work_func(struct work_struct *work) * simulated reset via debugs, so get an RPM reference. */ intel_runtime_pm_get(dev_priv); + + intel_prepare_reset(dev); + /* * All state reset _must_ be completed before we update the * reset counter, for otherwise waiters might miss the reset @@ -2411,7 +2451,7 @@ static void i915_error_work_func(struct work_struct *work) */ ret = i915_reset(dev); - intel_display_handle_reset(dev); + intel_finish_reset(dev); intel_runtime_pm_put(dev_priv); @@ -3036,10 +3076,15 @@ static void i915_hangcheck_elapsed(unsigned long data) void i915_queue_hangcheck(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer; + if (!i915.enable_hangcheck) return; - mod_timer(&dev_priv->gpu_error.hangcheck_timer, + /* Don't continually defer the hangcheck, but make sure it is active */ + if (timer_pending(timer)) + return; + mod_timer(timer, round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); } @@ -3266,8 +3311,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); if (INTEL_INFO(dev)->gen >= 6) { - pm_irqs |= dev_priv->pm_rps_events; - + /* + * RPS interrupts will get enabled/disabled on demand when RPS + * itself is enabled/disabled. + */ if (HAS_VEBOX(dev)) pm_irqs |= PM_VEBOX_USER_INTERRUPT; @@ -3479,7 +3526,11 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) dev_priv->pm_irq_mask = 0xffffffff; GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); - GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events); + /* + * RPS interrupts will get enabled/disabled on demand when RPS itself + * is enabled/disabled. + */ + GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); } @@ -3488,11 +3539,14 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; uint32_t de_pipe_enables; int pipe; + u32 aux_en = GEN8_AUX_CHANNEL_A; - if (IS_GEN9(dev_priv)) + if (IS_GEN9(dev_priv)) { de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | GEN9_DE_PIPE_IRQ_FAULT_ERRORS; - else + aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | + GEN9_AUX_CHANNEL_D; + } else de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; @@ -3510,7 +3564,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) dev_priv->de_irq_mask[pipe], de_pipe_enables); - GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); + GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en); } static int gen8_irq_postinstall(struct drm_device *dev) @@ -3533,34 +3587,8 @@ static int gen8_irq_postinstall(struct drm_device *dev) static int cherryview_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT | - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; - u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV | - PIPE_CRC_DONE_INTERRUPT_STATUS; - int pipe; - /* - * Leave vblank interrupts masked initially. enable/disable will - * toggle them based on usage. - */ - dev_priv->irq_mask = ~enable_mask; - - for_each_pipe(dev_priv, pipe) - I915_WRITE(PIPESTAT(pipe), 0xffff); - - spin_lock_irq(&dev_priv->irq_lock); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); - for_each_pipe(dev_priv, pipe) - i915_enable_pipestat(dev_priv, pipe, pipestat_enable); - spin_unlock_irq(&dev_priv->irq_lock); - - I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(VLV_IER, enable_mask); - I915_WRITE(VLV_IMR, dev_priv->irq_mask); - POSTING_READ(VLV_IMR); + vlv_display_irq_postinstall(dev_priv); gen8_gt_irq_postinstall(dev_priv); @@ -3580,6 +3608,20 @@ static void gen8_irq_uninstall(struct drm_device *dev) gen8_irq_reset(dev); } +static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) +{ + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled) + valleyview_display_irqs_uninstall(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + vlv_display_irq_reset(dev_priv); + + dev_priv->irq_mask = ~0; +} + static void valleyview_irq_uninstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3593,22 +3635,12 @@ static void valleyview_irq_uninstall(struct drm_device *dev) I915_WRITE(HWSTAM, 0xffffffff); - /* Interrupt setup is already guaranteed to be single-threaded, this is - * just to make the assert_spin_locked check happy. */ - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display_irqs_enabled) - valleyview_display_irqs_uninstall(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - - vlv_display_irq_reset(dev_priv); - - dev_priv->irq_mask = 0; + vlv_display_irq_uninstall(dev_priv); } static void cherryview_irq_uninstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; if (!dev_priv) return; @@ -3620,13 +3652,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev) GEN5_IRQ_RESET(GEN8_PCU_); - I915_WRITE(PORT_HOTPLUG_EN, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - - for_each_pipe(dev_priv, pipe) - I915_WRITE(PIPESTAT(pipe), 0xffff); - - GEN5_IRQ_RESET(VLV_); + vlv_display_irq_uninstall(dev_priv); } static void ironlake_irq_uninstall(struct drm_device *dev) @@ -3699,8 +3725,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev, if ((iir & flip_pending) == 0) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence @@ -3710,6 +3734,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev, if (I915_READ16(ISR) & flip_pending) goto check_page_flip; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); return true; @@ -3741,9 +3766,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) */ spin_lock(&dev_priv->irq_lock); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false, - "Command parser error, iir 0x%08x", - iir); + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); for_each_pipe(dev_priv, pipe) { int reg = PIPESTAT(pipe); @@ -3760,8 +3783,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) I915_WRITE16(IIR, iir & ~flip_mask); new_iir = I915_READ16(IIR); /* Flush posted writes */ - i915_update_dri1_breadcrumb(dev); - if (iir & I915_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[RCS]); @@ -3885,8 +3906,6 @@ static bool i915_handle_vblank(struct drm_device *dev, if ((iir & flip_pending) == 0) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence @@ -3896,6 +3915,7 @@ static bool i915_handle_vblank(struct drm_device *dev, if (I915_READ(ISR) & flip_pending) goto check_page_flip; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); return true; @@ -3926,9 +3946,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) */ spin_lock(&dev_priv->irq_lock); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false, - "Command parser error, iir 0x%08x", - iir); + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); for_each_pipe(dev_priv, pipe) { int reg = PIPESTAT(pipe); @@ -3998,8 +4016,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) iir = new_iir; } while (iir & ~flip_mask); - i915_update_dri1_breadcrumb(dev); - return ret; } @@ -4153,9 +4169,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) */ spin_lock(&dev_priv->irq_lock); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false, - "Command parser error, iir 0x%08x", - iir); + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); for_each_pipe(dev_priv, pipe) { int reg = PIPESTAT(pipe); @@ -4227,8 +4241,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) iir = new_iir; } - i915_update_dri1_breadcrumb(dev); - return ret; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d43fa0e627f8..172de3b3433b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -32,8 +32,19 @@ #define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ (pipe) == PIPE_B ? (b) : (c)) -#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) -#define _MASKED_BIT_DISABLE(a) ((a) << 16) +#define _MASKED_FIELD(mask, value) ({ \ + if (__builtin_constant_p(mask)) \ + BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \ + if (__builtin_constant_p(value)) \ + BUILD_BUG_ON_MSG((value) & 0xffff0000, "Incorrect value"); \ + if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \ + BUILD_BUG_ON_MSG((value) & ~(mask), \ + "Incorrect value for mask"); \ + (mask) << 16 | (value); }) +#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); }) +#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0)) + + /* PCI config space */ @@ -74,15 +85,17 @@ #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) +#define GCDGMBUS 0xcc #define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */ /* Graphics reset regs */ -#define I965_GDRST 0xc0 /* PCI config register */ +#define I915_GDRST 0xc0 /* PCI config register */ #define GRDOM_FULL (0<<2) #define GRDOM_RENDER (1<<2) #define GRDOM_MEDIA (3<<2) #define GRDOM_MASK (3<<2) +#define GRDOM_RESET_STATUS (1<<1) #define GRDOM_RESET_ENABLE (1<<0) #define ILK_GDSR 0x2ca4 /* MCHBAR offset */ @@ -248,6 +261,16 @@ #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) +/* SKL ones */ +#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8) #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */ #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) #define MI_SEMAPHORE_UPDATE (1<<21) @@ -314,6 +337,8 @@ #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ #define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) +#define MI_PREDICATE_SRC0 (0x2400) +#define MI_PREDICATE_SRC1 (0x2408) #define MI_PREDICATE_RESULT_2 (0x2214) #define LOWER_SLICE_ENABLED (1<<0) @@ -370,6 +395,7 @@ #define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) #define PIPE_CONTROL_CS_STALL (1<<20) #define PIPE_CONTROL_TLB_INVALIDATE (1<<18) +#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16) #define PIPE_CONTROL_QW_WRITE (1<<14) #define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) #define PIPE_CONTROL_DEPTH_STALL (1<<13) @@ -564,6 +590,7 @@ enum punit_power_well { #define PUNIT_REG_GPU_LFM 0xd3 #define PUNIT_REG_GPU_FREQ_REQ 0xd4 #define PUNIT_REG_GPU_FREQ_STS 0xd8 +#define GPLLENABLE (1<<4) #define GENFREQSTATUS (1<<0) #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc #define PUNIT_REG_CZ_TIMESTAMP 0xce @@ -1102,6 +1129,7 @@ enum punit_power_well { #define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) #define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) #define GEN6_NOSYNC 0 +#define RING_PSMI_CTL(base) ((base)+0x50) #define RING_MAX_IDLE(base) ((base)+0x54) #define RING_HWS_PGA(base) ((base)+0x80) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) @@ -1268,7 +1296,7 @@ enum punit_power_well { #define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0) #define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1) #define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) -#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16) +#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1) #define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) #define GFX_MODE 0x02520 @@ -1432,6 +1460,7 @@ enum punit_power_well { #define GEN6_BLITTER_FBC_NOTIFY (1<<3) #define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 +#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0) #define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) #define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) @@ -2030,6 +2059,8 @@ enum punit_power_well { #define DCC_ADDRESSING_MODE_MASK (3 << 0) #define DCC_CHANNEL_XOR_DISABLE (1 << 10) #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) +#define DCC2 0x10204 +#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20) /* Pineview MCH register contains DDR3 setting */ #define CSHRDDR3CTL 0x101a8 @@ -2313,7 +2344,6 @@ enum punit_power_well { #define GEN6_GT_THREAD_STATUS_REG 0x13805c #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 -#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16)) #define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948) #define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) @@ -4904,6 +4934,18 @@ enum punit_power_well { #define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) #define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) +#define _PSA_CTL 0x68180 +#define _PSB_CTL 0x68980 +#define PS_ENABLE (1<<31) +#define _PSA_WIN_SZ 0x68174 +#define _PSB_WIN_SZ 0x68974 +#define _PSA_WIN_POS 0x68170 +#define _PSB_WIN_POS 0x68970 + +#define PS_CTL(pipe) _PIPE(pipe, _PSA_CTL, _PSB_CTL) +#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ) +#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS) + /* legacy palette */ #define _LGC_PALETTE_A 0x4a000 #define _LGC_PALETTE_B 0x4a800 @@ -5048,6 +5090,9 @@ enum punit_power_well { #define GEN8_DE_PORT_IIR 0x44448 #define GEN8_DE_PORT_IER 0x4444c #define GEN8_PORT_DP_A_HOTPLUG (1 << 3) +#define GEN9_AUX_CHANNEL_D (1 << 27) +#define GEN9_AUX_CHANNEL_C (1 << 26) +#define GEN9_AUX_CHANNEL_B (1 << 25) #define GEN8_AUX_CHANNEL_A (1 << 0) #define GEN8_DE_MISC_ISR 0x44460 @@ -5131,6 +5176,7 @@ enum punit_power_well { /* GEN8 chicken */ #define HDC_CHICKEN0 0x7300 #define HDC_FORCE_NON_COHERENT (1<<4) +#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11) #define HDC_FENCE_DEST_SLM_DISABLE (1<<14) /* WaCatErrorRejectionIssue */ @@ -6010,11 +6056,12 @@ enum punit_power_well { #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) #define DISPLAY_IPS_CONTROL 0x19 +#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A #define GEN6_PCODE_DATA 0x138128 #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 +#define GEN6_PCODE_DATA1 0x13812C -#define GEN9_PCODE_DATA1 0x13812C #define GEN9_PCODE_READ_MEM_LATENCY 0x6 #define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF #define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8 @@ -6427,6 +6474,83 @@ enum punit_power_well { #define LCPLL_CD_SOURCE_FCLK (1<<21) #define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) +/* + * SKL Clocks + */ + +/* CDCLK_CTL */ +#define CDCLK_CTL 0x46000 +#define CDCLK_FREQ_SEL_MASK (3<<26) +#define CDCLK_FREQ_450_432 (0<<26) +#define CDCLK_FREQ_540 (1<<26) +#define CDCLK_FREQ_337_308 (2<<26) +#define CDCLK_FREQ_675_617 (3<<26) +#define CDCLK_FREQ_DECIMAL_MASK (0x7ff) + +/* LCPLL_CTL */ +#define LCPLL1_CTL 0x46010 +#define LCPLL2_CTL 0x46014 +#define LCPLL_PLL_ENABLE (1<<31) + +/* DPLL control1 */ +#define DPLL_CTRL1 0x6C058 +#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5)) +#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4)) +#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1)) +#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1) +#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1)) +#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6)) +#define DPLL_CRTL1_LINK_RATE_2700 0 +#define DPLL_CRTL1_LINK_RATE_1350 1 +#define DPLL_CRTL1_LINK_RATE_810 2 +#define DPLL_CRTL1_LINK_RATE_1620 3 +#define DPLL_CRTL1_LINK_RATE_1080 4 +#define DPLL_CRTL1_LINK_RATE_2160 5 + +/* DPLL control2 */ +#define DPLL_CTRL2 0x6C05C +#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<(port+15)) +#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1)) +#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1) +#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) (clk<<((port)*3+1)) +#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3)) + +/* DPLL Status */ +#define DPLL_STATUS 0x6C060 +#define DPLL_LOCK(id) (1<<((id)*8)) + +/* DPLL cfg */ +#define DPLL1_CFGCR1 0x6C040 +#define DPLL2_CFGCR1 0x6C048 +#define DPLL3_CFGCR1 0x6C050 +#define DPLL_CFGCR1_FREQ_ENABLE (1<<31) +#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9) +#define DPLL_CFGCR1_DCO_FRACTION(x) (x<<9) +#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff) + +#define DPLL1_CFGCR2 0x6C044 +#define DPLL2_CFGCR2 0x6C04C +#define DPLL3_CFGCR2 0x6C054 +#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8) +#define DPLL_CFGCR2_QDIV_RATIO(x) (x<<8) +#define DPLL_CFGCR2_QDIV_MODE(x) (x<<7) +#define DPLL_CFGCR2_KDIV_MASK (3<<5) +#define DPLL_CFGCR2_KDIV(x) (x<<5) +#define DPLL_CFGCR2_KDIV_5 (0<<5) +#define DPLL_CFGCR2_KDIV_2 (1<<5) +#define DPLL_CFGCR2_KDIV_3 (2<<5) +#define DPLL_CFGCR2_KDIV_1 (3<<5) +#define DPLL_CFGCR2_PDIV_MASK (7<<2) +#define DPLL_CFGCR2_PDIV(x) (x<<2) +#define DPLL_CFGCR2_PDIV_1 (0<<2) +#define DPLL_CFGCR2_PDIV_2 (1<<2) +#define DPLL_CFGCR2_PDIV_3 (2<<2) +#define DPLL_CFGCR2_PDIV_7 (4<<2) +#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) + +#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8) +#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8) + /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, * since on HSW we can't write to it using I915_WRITE. */ #define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 043123c77a1f..26368822a33f 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -203,34 +203,19 @@ static void i915_save_display(struct drm_device *dev) i915_save_display_reg(dev); /* LVDS state */ - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) - dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); - } else if (IS_VALLEYVIEW(dev)) { - dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); - dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); - - dev_priv->regfile.saveBLC_HIST_CTL = - I915_READ(VLV_BLC_HIST_CTL(PIPE_A)); - dev_priv->regfile.saveBLC_HIST_CTL_B = - I915_READ(VLV_BLC_HIST_CTL(PIPE_B)); - } else { - dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); - dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); - dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); - if (IS_MOBILE(dev) && !IS_I830(dev)) - dev_priv->regfile.saveLVDS = I915_READ(LVDS); - } - - if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) - dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL); + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); + else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev)) + dev_priv->regfile.saveLVDS = I915_READ(LVDS); + /* Panel power sequencer */ if (HAS_PCH_SPLIT(dev)) { + dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); - } else { + } else if (!IS_VALLEYVIEW(dev)) { + dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); @@ -259,29 +244,19 @@ static void i915_restore_display(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) mask = ~LVDS_PORT_EN; + /* LVDS state */ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask); else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev)) I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask); - if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) - I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); - + /* Panel power sequencer */ if (HAS_PCH_SPLIT(dev)) { I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); - I915_WRITE(RSTDBYCTL, - dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); - } else if (IS_VALLEYVIEW(dev)) { - I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A), - dev_priv->regfile.saveBLC_HIST_CTL); - I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B), - dev_priv->regfile.saveBLC_HIST_CTL); - } else { - I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); - I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL); + } else if (!IS_VALLEYVIEW(dev)) { I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); @@ -328,6 +303,10 @@ int i915_save_state(struct drm_device *dev) } } + if (IS_GEN4(dev)) + pci_read_config_word(dev->pdev, GCDGMBUS, + &dev_priv->regfile.saveGCDGMBUS); + /* Cache mode state */ if (INTEL_INFO(dev)->gen < 7) dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); @@ -356,6 +335,10 @@ int i915_restore_state(struct drm_device *dev) mutex_lock(&dev->struct_mutex); i915_gem_restore_fences(dev); + + if (IS_GEN4(dev)) + pci_write_config_word(dev->pdev, GCDGMBUS, + dev_priv->regfile.saveGCDGMBUS); i915_restore_display(dev); if (!drm_core_check_feature(dev, DRIVER_MODESET)) { @@ -368,6 +351,8 @@ int i915_restore_state(struct drm_device *dev) I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR); I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR); I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG); + I915_WRITE(RSTDBYCTL, + dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); } else { I915_WRITE(IER, dev_priv->regfile.saveIER); I915_WRITE(IMR, dev_priv->regfile.saveIMR); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index f5aa0067755a..751d4ad14d62 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -587,6 +587,110 @@ TRACE_EVENT(intel_gpu_freq_change, TP_printk("new_freq=%u", __entry->freq) ); +/** + * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints + * + * With full ppgtt enabled each process using drm will allocate at least one + * translation table. With these traces it is possible to keep track of the + * allocation and of the lifetime of the tables; this can be used during + * testing/debug to verify that we are not leaking ppgtts. + * These traces identify the ppgtt through the vm pointer, which is also printed + * by the i915_vma_bind and i915_vma_unbind tracepoints. + */ +DECLARE_EVENT_CLASS(i915_ppgtt, + TP_PROTO(struct i915_address_space *vm), + TP_ARGS(vm), + + TP_STRUCT__entry( + __field(struct i915_address_space *, vm) + __field(u32, dev) + ), + + TP_fast_assign( + __entry->vm = vm; + __entry->dev = vm->dev->primary->index; + ), + + TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm) +) + +DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create, + TP_PROTO(struct i915_address_space *vm), + TP_ARGS(vm) +); + +DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release, + TP_PROTO(struct i915_address_space *vm), + TP_ARGS(vm) +); + +/** + * DOC: i915_context_create and i915_context_free tracepoints + * + * These tracepoints are used to track creation and deletion of contexts. + * If full ppgtt is enabled, they also print the address of the vm assigned to + * the context. + */ +DECLARE_EVENT_CLASS(i915_context, + TP_PROTO(struct intel_context *ctx), + TP_ARGS(ctx), + + TP_STRUCT__entry( + __field(u32, dev) + __field(struct intel_context *, ctx) + __field(struct i915_address_space *, vm) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; + __entry->dev = ctx->file_priv->dev_priv->dev->primary->index; + ), + + TP_printk("dev=%u, ctx=%p, ctx_vm=%p", + __entry->dev, __entry->ctx, __entry->vm) +) + +DEFINE_EVENT(i915_context, i915_context_create, + TP_PROTO(struct intel_context *ctx), + TP_ARGS(ctx) +); + +DEFINE_EVENT(i915_context, i915_context_free, + TP_PROTO(struct intel_context *ctx), + TP_ARGS(ctx) +); + +/** + * DOC: switch_mm tracepoint + * + * This tracepoint allows tracking of the mm switch, which is an important point + * in the lifetime of the vm in the legacy submission path. This tracepoint is + * called only if full ppgtt is enabled. + */ +TRACE_EVENT(switch_mm, + TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to), + + TP_ARGS(ring, to), + + TP_STRUCT__entry( + __field(u32, ring) + __field(struct intel_context *, to) + __field(struct i915_address_space *, vm) + __field(u32, dev) + ), + + TP_fast_assign( + __entry->ring = ring->id; + __entry->to = to; + __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; + __entry->dev = ring->dev->primary->index; + ), + + TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", + __entry->dev, __entry->ring, __entry->to, __entry->vm) +); + #endif /* _I915_TRACE_H_ */ /* This part must be outside protection */ diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c index 480da593e6c0..d10fe3e9c49f 100644 --- a/drivers/gpu/drm/i915/i915_ums.c +++ b/drivers/gpu/drm/i915/i915_ums.c @@ -270,6 +270,12 @@ void i915_save_display_reg(struct drm_device *dev) } /* FIXME: regfile.save TV & SDVO state */ + /* Panel fitter */ + if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) { + dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL); + dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); + } + /* Backlight */ if (INTEL_INFO(dev)->gen <= 4) pci_read_config_byte(dev->pdev, PCI_LBPC, @@ -284,6 +290,7 @@ void i915_save_display_reg(struct drm_device *dev) dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); if (INTEL_INFO(dev)->gen >= 4) dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); + dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); } return; @@ -313,6 +320,13 @@ void i915_restore_display_reg(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 4) I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); + I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL); + } + + /* Panel fitter */ + if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) { + I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); + I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); } /* Display port ratios (must be done before clock is set) */ diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 44c49dfe1096..2c7ed5cb29c0 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -107,7 +107,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector, tmp &= ~bits_elda; I915_WRITE(reg_elda, tmp); - for (i = 0; i < eld[2]; i++) + for (i = 0; i < drm_eld_size(eld) / 4; i++) if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) return false; @@ -162,7 +162,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector, len = (tmp >> 9) & 0x1f; /* ELD buffer size */ I915_WRITE(G4X_AUD_CNTL_ST, tmp); - len = min_t(int, eld[2], len); + len = min(drm_eld_size(eld) / 4, len); DRM_DEBUG_DRIVER("ELD size %d\n", len); for (i = 0; i < len; i++) I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); @@ -194,6 +194,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder) /* Invalidate ELD */ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); tmp &= ~AUDIO_ELD_VALID(pipe); + tmp &= ~AUDIO_OUTPUT_ENABLE(pipe); I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); } @@ -209,7 +210,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector, int len, i; DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n", - pipe_name(pipe), eld[2]); + pipe_name(pipe), drm_eld_size(eld)); /* Enable audio presence detect, invalidate ELD */ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); @@ -230,8 +231,8 @@ static void hsw_audio_codec_enable(struct drm_connector *connector, I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp); /* Up to 84 bytes of hw ELD buffer */ - len = min_t(int, eld[2], 21); - for (i = 0; i < len; i++) + len = min(drm_eld_size(eld), 84); + for (i = 0; i < len / 4; i++) I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i)); /* ELD valid */ @@ -320,7 +321,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, int aud_cntrl_st2; DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n", - port_name(port), pipe_name(pipe), eld[2]); + port_name(port), pipe_name(pipe), drm_eld_size(eld)); /* * FIXME: We're supposed to wait for vblank here, but we have vblanks @@ -364,8 +365,8 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, I915_WRITE(aud_cntl_st, tmp); /* Up to 84 bytes of hw ELD buffer */ - len = min_t(int, eld[2], 21); - for (i = 0; i < len; i++) + len = min(drm_eld_size(eld), 84); + for (i = 0; i < len / 4; i++) I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); /* ELD valid */ diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 68703cecdefc..e6b45cd150d3 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -670,6 +670,111 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, return (refclk * n * 100) / (p * r); } +static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, + uint32_t dpll) +{ + uint32_t cfgcr1_reg, cfgcr2_reg; + uint32_t cfgcr1_val, cfgcr2_val; + uint32_t p0, p1, p2, dco_freq; + + cfgcr1_reg = GET_CFG_CR1_REG(dpll); + cfgcr2_reg = GET_CFG_CR2_REG(dpll); + + cfgcr1_val = I915_READ(cfgcr1_reg); + cfgcr2_val = I915_READ(cfgcr2_reg); + + p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK; + p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK; + + if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1)) + p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; + else + p1 = 1; + + + switch (p0) { + case DPLL_CFGCR2_PDIV_1: + p0 = 1; + break; + case DPLL_CFGCR2_PDIV_2: + p0 = 2; + break; + case DPLL_CFGCR2_PDIV_3: + p0 = 3; + break; + case DPLL_CFGCR2_PDIV_7: + p0 = 7; + break; + } + + switch (p2) { + case DPLL_CFGCR2_KDIV_5: + p2 = 5; + break; + case DPLL_CFGCR2_KDIV_2: + p2 = 2; + break; + case DPLL_CFGCR2_KDIV_3: + p2 = 3; + break; + case DPLL_CFGCR2_KDIV_1: + p2 = 1; + break; + } + + dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000; + + dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 * + 1000) / 0x8000; + + return dco_freq / (p0 * p1 * p2 * 5); +} + + +static void skl_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + int link_clock = 0; + uint32_t dpll_ctl1, dpll; + + dpll = pipe_config->ddi_pll_sel; + + dpll_ctl1 = I915_READ(DPLL_CTRL1); + + if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) { + link_clock = skl_calc_wrpll_link(dev_priv, dpll); + } else { + link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll); + link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll); + + switch (link_clock) { + case DPLL_CRTL1_LINK_RATE_810: + link_clock = 81000; + break; + case DPLL_CRTL1_LINK_RATE_1350: + link_clock = 135000; + break; + case DPLL_CRTL1_LINK_RATE_2700: + link_clock = 270000; + break; + default: + WARN(1, "Unsupported link rate\n"); + break; + } + link_clock *= 2; + } + + pipe_config->port_clock = link_clock; + + if (pipe_config->has_dp_encoder) + pipe_config->adjusted_mode.crtc_clock = + intel_dotclock_calculate(pipe_config->port_clock, + &pipe_config->dp_m_n); + else + pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock; +} + static void hsw_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { @@ -828,6 +933,228 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc, return true; } +struct skl_wrpll_params { + uint32_t dco_fraction; + uint32_t dco_integer; + uint32_t qdiv_ratio; + uint32_t qdiv_mode; + uint32_t kdiv; + uint32_t pdiv; + uint32_t central_freq; +}; + +static void +skl_ddi_calculate_wrpll(int clock /* in Hz */, + struct skl_wrpll_params *wrpll_params) +{ + uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ + uint64_t dco_central_freq[3] = {8400000000ULL, + 9000000000ULL, + 9600000000ULL}; + uint32_t min_dco_deviation = 400; + uint32_t min_dco_index = 3; + uint32_t P0[4] = {1, 2, 3, 7}; + uint32_t P2[4] = {1, 2, 3, 5}; + bool found = false; + uint32_t candidate_p = 0; + uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0}; + uint32_t candidate_p2[3] = {0}; + uint32_t dco_central_freq_deviation[3]; + uint32_t i, P1, k, dco_count; + bool retry_with_odd = false; + uint64_t dco_freq; + + /* Determine P0, P1 or P2 */ + for (dco_count = 0; dco_count < 3; dco_count++) { + found = false; + candidate_p = + div64_u64(dco_central_freq[dco_count], afe_clock); + if (retry_with_odd == false) + candidate_p = (candidate_p % 2 == 0 ? + candidate_p : candidate_p + 1); + + for (P1 = 1; P1 < candidate_p; P1++) { + for (i = 0; i < 4; i++) { + if (!(P0[i] != 1 || P1 == 1)) + continue; + + for (k = 0; k < 4; k++) { + if (P1 != 1 && P2[k] != 2) + continue; + + if (candidate_p == P0[i] * P1 * P2[k]) { + /* Found possible P0, P1, P2 */ + found = true; + candidate_p0[dco_count] = P0[i]; + candidate_p1[dco_count] = P1; + candidate_p2[dco_count] = P2[k]; + goto found; + } + + } + } + } + +found: + if (found) { + dco_central_freq_deviation[dco_count] = + div64_u64(10000 * + abs_diff((candidate_p * afe_clock), + dco_central_freq[dco_count]), + dco_central_freq[dco_count]); + + if (dco_central_freq_deviation[dco_count] < + min_dco_deviation) { + min_dco_deviation = + dco_central_freq_deviation[dco_count]; + min_dco_index = dco_count; + } + } + + if (min_dco_index > 2 && dco_count == 2) { + retry_with_odd = true; + dco_count = 0; + } + } + + if (min_dco_index > 2) { + WARN(1, "No valid values found for the given pixel clock\n"); + } else { + wrpll_params->central_freq = dco_central_freq[min_dco_index]; + + switch (dco_central_freq[min_dco_index]) { + case 9600000000ULL: + wrpll_params->central_freq = 0; + break; + case 9000000000ULL: + wrpll_params->central_freq = 1; + break; + case 8400000000ULL: + wrpll_params->central_freq = 3; + } + + switch (candidate_p0[min_dco_index]) { + case 1: + wrpll_params->pdiv = 0; + break; + case 2: + wrpll_params->pdiv = 1; + break; + case 3: + wrpll_params->pdiv = 2; + break; + case 7: + wrpll_params->pdiv = 4; + break; + default: + WARN(1, "Incorrect PDiv\n"); + } + + switch (candidate_p2[min_dco_index]) { + case 5: + wrpll_params->kdiv = 0; + break; + case 2: + wrpll_params->kdiv = 1; + break; + case 3: + wrpll_params->kdiv = 2; + break; + case 1: + wrpll_params->kdiv = 3; + break; + default: + WARN(1, "Incorrect KDiv\n"); + } + + wrpll_params->qdiv_ratio = candidate_p1[min_dco_index]; + wrpll_params->qdiv_mode = + (wrpll_params->qdiv_ratio == 1) ? 0 : 1; + + dco_freq = candidate_p0[min_dco_index] * + candidate_p1[min_dco_index] * + candidate_p2[min_dco_index] * afe_clock; + + /* + * Intermediate values are in Hz. + * Divide by MHz to match bsepc + */ + wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1))); + wrpll_params->dco_fraction = + div_u64(((div_u64(dco_freq, 24) - + wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1)); + + } +} + + +static bool +skl_ddi_pll_select(struct intel_crtc *intel_crtc, + struct intel_encoder *intel_encoder, + int clock) +{ + struct intel_shared_dpll *pll; + uint32_t ctrl1, cfgcr1, cfgcr2; + + /* + * See comment in intel_dpll_hw_state to understand why we always use 0 + * as the DPLL id in this function. + */ + + ctrl1 = DPLL_CTRL1_OVERRIDE(0); + + if (intel_encoder->type == INTEL_OUTPUT_HDMI) { + struct skl_wrpll_params wrpll_params = { 0, }; + + ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); + + skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params); + + cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | + DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | + wrpll_params.dco_integer; + + cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | + DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) | + DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | + DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | + wrpll_params.central_freq; + } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { + struct drm_encoder *encoder = &intel_encoder->base; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + switch (intel_dp->link_bw) { + case DP_LINK_BW_1_62: + ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0); + break; + case DP_LINK_BW_2_7: + ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0); + break; + case DP_LINK_BW_5_4: + ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0); + break; + } + + cfgcr1 = cfgcr2 = 0; + } else /* eDP */ + return true; + + intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1; + intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1; + intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2; + + pll = intel_get_shared_dpll(intel_crtc); + if (pll == NULL) { + DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", + pipe_name(intel_crtc->pipe)); + return false; + } + + /* shared DPLL id 0 is DPLL 1 */ + intel_crtc->new_config->ddi_pll_sel = pll->id + 1; + + return true; +} /* * Tries to find a *shared* PLL for the CRTC and store it in @@ -838,11 +1165,15 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc, */ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc) { + struct drm_device *dev = intel_crtc->base.dev; struct intel_encoder *intel_encoder = intel_ddi_get_crtc_new_encoder(intel_crtc); int clock = intel_crtc->new_config->port_clock; - return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock); + if (IS_SKYLAKE(dev)) + return skl_ddi_pll_select(intel_crtc, intel_encoder, clock); + else + return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock); } void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) @@ -1134,7 +1465,8 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); enum port port = intel_ddi_get_encoder_port(intel_encoder); int type = intel_encoder->type; @@ -1144,8 +1476,42 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_edp_panel_on(intel_dp); } - WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE); - I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel); + if (IS_SKYLAKE(dev)) { + uint32_t dpll = crtc->config.ddi_pll_sel; + uint32_t val; + + /* + * DPLL0 is used for eDP and is the only "private" DPLL (as + * opposed to shared) on SKL + */ + if (type == INTEL_OUTPUT_EDP) { + WARN_ON(dpll != SKL_DPLL0); + + val = I915_READ(DPLL_CTRL1); + + val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | + DPLL_CTRL1_SSC(dpll) | + DPLL_CRTL1_LINK_RATE_MASK(dpll)); + val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6); + + I915_WRITE(DPLL_CTRL1, val); + POSTING_READ(DPLL_CTRL1); + } + + /* DDI -> PLL mapping */ + val = I915_READ(DPLL_CTRL2); + + val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) | + DPLL_CTRL2_DDI_CLK_SEL_MASK(port)); + val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) | + DPLL_CTRL2_DDI_SEL_OVERRIDE(port)); + + I915_WRITE(DPLL_CTRL2, val); + + } else { + WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE); + I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel); + } if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1155,7 +1521,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); - if (port != PORT_A) + if (port != PORT_A || INTEL_INFO(dev)->gen >= 9) intel_dp_stop_link_train(intel_dp); } else if (type == INTEL_OUTPUT_HDMI) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); @@ -1169,7 +1535,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; enum port port = intel_ddi_get_encoder_port(intel_encoder); int type = intel_encoder->type; uint32_t val; @@ -1197,7 +1564,11 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) intel_edp_panel_off(intel_dp); } - I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); + if (IS_SKYLAKE(dev)) + I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) | + DPLL_CTRL2_DDI_CLK_OFF(port))); + else + I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); } static void intel_enable_ddi(struct intel_encoder *intel_encoder) @@ -1224,11 +1595,11 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) } else if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (port == PORT_A) + if (port == PORT_A && INTEL_INFO(dev)->gen < 9) intel_dp_stop_link_train(intel_dp); intel_edp_backlight_on(intel_dp); - intel_edp_psr_enable(intel_dp); + intel_psr_enable(intel_dp); } if (intel_crtc->config.has_audio) { @@ -1254,11 +1625,59 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - intel_edp_psr_disable(intel_dp); + intel_psr_disable(intel_dp); intel_edp_backlight_off(intel_dp); } } +static int skl_get_cdclk_freq(struct drm_i915_private *dev_priv) +{ + uint32_t lcpll1 = I915_READ(LCPLL1_CTL); + uint32_t cdctl = I915_READ(CDCLK_CTL); + uint32_t linkrate; + + if (!(lcpll1 & LCPLL_PLL_ENABLE)) { + WARN(1, "LCPLL1 not enabled\n"); + return 24000; /* 24MHz is the cd freq with NSSC ref */ + } + + if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) + return 540000; + + linkrate = (I915_READ(DPLL_CTRL1) & + DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1; + + if (linkrate == DPLL_CRTL1_LINK_RATE_2160 || + linkrate == DPLL_CRTL1_LINK_RATE_1080) { + /* vco 8640 */ + switch (cdctl & CDCLK_FREQ_SEL_MASK) { + case CDCLK_FREQ_450_432: + return 432000; + case CDCLK_FREQ_337_308: + return 308570; + case CDCLK_FREQ_675_617: + return 617140; + default: + WARN(1, "Unknown cd freq selection\n"); + } + } else { + /* vco 8100 */ + switch (cdctl & CDCLK_FREQ_SEL_MASK) { + case CDCLK_FREQ_450_432: + return 450000; + case CDCLK_FREQ_337_308: + return 337500; + case CDCLK_FREQ_675_617: + return 675000; + default: + WARN(1, "Unknown cd freq selection\n"); + } + } + + /* error case, do as if DPLL0 isn't enabled */ + return 24000; +} + static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv) { uint32_t lcpll = I915_READ(LCPLL_CTL); @@ -1300,6 +1719,9 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; + if (IS_SKYLAKE(dev)) + return skl_get_cdclk_freq(dev_priv); + if (IS_BROADWELL(dev)) return bdw_get_cdclk_freq(dev_priv); @@ -1361,26 +1783,156 @@ static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) } } +static const char * const skl_ddi_pll_names[] = { + "DPLL 1", + "DPLL 2", + "DPLL 3", +}; + +struct skl_dpll_regs { + u32 ctl, cfgcr1, cfgcr2; +}; + +/* this array is indexed by the *shared* pll id */ +static const struct skl_dpll_regs skl_dpll_regs[3] = { + { + /* DPLL 1 */ + .ctl = LCPLL2_CTL, + .cfgcr1 = DPLL1_CFGCR1, + .cfgcr2 = DPLL1_CFGCR2, + }, + { + /* DPLL 2 */ + .ctl = WRPLL_CTL1, + .cfgcr1 = DPLL2_CFGCR1, + .cfgcr2 = DPLL2_CFGCR2, + }, + { + /* DPLL 3 */ + .ctl = WRPLL_CTL2, + .cfgcr1 = DPLL3_CFGCR1, + .cfgcr2 = DPLL3_CFGCR2, + }, +}; + +static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + uint32_t val; + unsigned int dpll; + const struct skl_dpll_regs *regs = skl_dpll_regs; + + /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */ + dpll = pll->id + 1; + + val = I915_READ(DPLL_CTRL1); + + val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) | + DPLL_CRTL1_LINK_RATE_MASK(dpll)); + val |= pll->config.hw_state.ctrl1 << (dpll * 6); + + I915_WRITE(DPLL_CTRL1, val); + POSTING_READ(DPLL_CTRL1); + + I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1); + I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2); + POSTING_READ(regs[pll->id].cfgcr1); + POSTING_READ(regs[pll->id].cfgcr2); + + /* the enable bit is always bit 31 */ + I915_WRITE(regs[pll->id].ctl, + I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE); + + if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5)) + DRM_ERROR("DPLL %d not locked\n", dpll); +} + +static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + const struct skl_dpll_regs *regs = skl_dpll_regs; + + /* the enable bit is always bit 31 */ + I915_WRITE(regs[pll->id].ctl, + I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE); + POSTING_READ(regs[pll->id].ctl); +} + +static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + uint32_t val; + unsigned int dpll; + const struct skl_dpll_regs *regs = skl_dpll_regs; + + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) + return false; + + /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */ + dpll = pll->id + 1; + + val = I915_READ(regs[pll->id].ctl); + if (!(val & LCPLL_PLL_ENABLE)) + return false; + + val = I915_READ(DPLL_CTRL1); + hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f; + + /* avoid reading back stale values if HDMI mode is not enabled */ + if (val & DPLL_CTRL1_HDMI_MODE(dpll)) { + hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1); + hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2); + } + + return true; +} + +static void skl_shared_dplls_init(struct drm_i915_private *dev_priv) +{ + int i; + + dev_priv->num_shared_dpll = 3; + + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + dev_priv->shared_dplls[i].id = i; + dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i]; + dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable; + dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable; + dev_priv->shared_dplls[i].get_hw_state = + skl_ddi_pll_get_hw_state; + } +} + void intel_ddi_pll_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t val = I915_READ(LCPLL_CTL); - hsw_shared_dplls_init(dev_priv); - - /* The LCPLL register should be turned on by the BIOS. For now let's - * just check its state and print errors in case something is wrong. - * Don't even try to turn it on. - */ + if (IS_SKYLAKE(dev)) + skl_shared_dplls_init(dev_priv); + else + hsw_shared_dplls_init(dev_priv); DRM_DEBUG_KMS("CDCLK running at %dKHz\n", intel_ddi_get_cdclk_freq(dev_priv)); - if (val & LCPLL_CD_SOURCE_FCLK) - DRM_ERROR("CDCLK source is not LCPLL\n"); + if (IS_SKYLAKE(dev)) { + if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) + DRM_ERROR("LCPLL1 is disabled\n"); + } else { + /* + * The LCPLL register should be turned on by the BIOS. For now + * let's just check its state and print errors in case + * something is wrong. Don't even try to turn it on. + */ + + if (val & LCPLL_CD_SOURCE_FCLK) + DRM_ERROR("CDCLK source is not LCPLL\n"); - if (val & LCPLL_PLL_DISABLE) - DRM_ERROR("LCPLL is disabled\n"); + if (val & LCPLL_PLL_DISABLE) + DRM_ERROR("LCPLL is disabled\n"); + } } void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) @@ -1475,7 +2027,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + struct intel_hdmi *intel_hdmi; u32 temp, flags = 0; + struct drm_device *dev = dev_priv->dev; temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (temp & TRANS_DDI_PHSYNC) @@ -1509,6 +2063,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder, switch (temp & TRANS_DDI_MODE_SELECT_MASK) { case TRANS_DDI_MODE_SELECT_HDMI: pipe_config->has_hdmi_sink = true; + intel_hdmi = enc_to_intel_hdmi(&encoder->base); + + if (intel_hdmi->infoframe_enabled(&encoder->base)) + pipe_config->has_infoframe = true; + break; case TRANS_DDI_MODE_SELECT_DVI: case TRANS_DDI_MODE_SELECT_FDI: break; @@ -1547,7 +2106,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder, dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; } - hsw_ddi_clock_get(encoder, pipe_config); + if (INTEL_INFO(dev)->gen <= 8) + hsw_ddi_clock_get(encoder, pipe_config); + else + skl_ddi_clock_get(encoder, pipe_config); } static void intel_ddi_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 947144985700..e2af1383b179 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2765,25 +2765,10 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, return 0; } -void intel_display_handle_reset(struct drm_device *dev) +static void intel_complete_page_flips(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; - /* - * Flips in the rings have been nuked by the reset, - * so complete all pending flips so that user space - * will get its events and not get stuck. - * - * Also update the base address of all primary - * planes to the the last fb to make sure we're - * showing the correct fb after a reset. - * - * Need to make two loops over the crtcs so that we - * don't try to grab a crtc mutex before the - * pending_flip_queue really got woken up. - */ - for_each_crtc(dev, crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum plane plane = intel_crtc->plane; @@ -2791,6 +2776,12 @@ void intel_display_handle_reset(struct drm_device *dev) intel_prepare_page_flip(dev, plane); intel_finish_page_flip_plane(dev, plane); } +} + +static void intel_update_primary_planes(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; for_each_crtc(dev, crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -2810,6 +2801,79 @@ void intel_display_handle_reset(struct drm_device *dev) } } +void intel_prepare_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc; + + /* no reset support for gen2 */ + if (IS_GEN2(dev)) + return; + + /* reset doesn't touch the display */ + if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + return; + + drm_modeset_lock_all(dev); + + /* + * Disabling the crtcs gracefully seems nicer. Also the + * g33 docs say we should at least disable all the planes. + */ + for_each_intel_crtc(dev, crtc) { + if (crtc->active) + dev_priv->display.crtc_disable(&crtc->base); + } +} + +void intel_finish_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + /* + * Flips in the rings will be nuked by the reset, + * so complete all pending flips so that user space + * will get its events and not get stuck. + */ + intel_complete_page_flips(dev); + + /* no reset support for gen2 */ + if (IS_GEN2(dev)) + return; + + /* reset doesn't touch the display */ + if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { + /* + * Flips in the rings have been nuked by the reset, + * so update the base address of all primary + * planes to the the last fb to make sure we're + * showing the correct fb after a reset. + */ + intel_update_primary_planes(dev); + return; + } + + /* + * The display has been reset as well, + * so need a full re-initialization. + */ + intel_runtime_pm_disable_interrupts(dev_priv); + intel_runtime_pm_enable_interrupts(dev_priv); + + intel_modeset_init_hw(dev); + + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev); + spin_unlock_irq(&dev_priv->irq_lock); + + intel_modeset_setup_hw_state(dev, true); + + intel_hpd_init(dev_priv); + + drm_modeset_unlock_all(dev); +} + static int intel_finish_fb(struct drm_framebuffer *old_fb) { @@ -2931,8 +2995,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return ret; } - intel_update_pipe_size(intel_crtc); - dev_priv->display.update_primary_plane(crtc, fb, x, y); if (intel_crtc->active) @@ -4005,6 +4067,19 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe) } } +static void skylake_pfit_enable(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = crtc->pipe; + + if (crtc->config.pch_pfit.enabled) { + I915_WRITE(PS_CTL(pipe), PS_ENABLE); + I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos); + I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size); + } +} + static void ironlake_pfit_enable(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -4388,7 +4463,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_ddi_enable_pipe_clock(intel_crtc); - ironlake_pfit_enable(intel_crtc); + if (IS_SKYLAKE(dev)) + skylake_pfit_enable(intel_crtc); + else + ironlake_pfit_enable(intel_crtc); /* * On ILK+ LUT must be loaded before the pipe is running but with @@ -4422,6 +4500,21 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_crtc_enable_planes(crtc); } +static void skylake_pfit_disable(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = crtc->pipe; + + /* To avoid upsetting the power well on haswell only disable the pfit if + * it's in use. The hw state code will make sure we get this right. */ + if (crtc->config.pch_pfit.enabled) { + I915_WRITE(PS_CTL(pipe), 0); + I915_WRITE(PS_WIN_POS(pipe), 0); + I915_WRITE(PS_WIN_SZ(pipe), 0); + } +} + static void ironlake_pfit_disable(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -4472,7 +4565,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) ironlake_fdi_disable(crtc); ironlake_disable_pch_transcoder(dev_priv, pipe); - intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); if (HAS_PCH_CPT(dev)) { /* disable TRANS_DP_CTL */ @@ -4534,14 +4626,15 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); - ironlake_pfit_disable(intel_crtc); + if (IS_SKYLAKE(dev)) + skylake_pfit_disable(intel_crtc); + else + ironlake_pfit_disable(intel_crtc); intel_ddi_disable_pipe_clock(intel_crtc); if (intel_crtc->config.has_pch_encoder) { lpt_disable_pch_transcoder(dev_priv); - intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, - true); intel_ddi_fdi_disable(crtc); } @@ -4907,10 +5000,23 @@ static void valleyview_modeset_global_resources(struct drm_device *dev) int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); if (req_cdclk != dev_priv->vlv_cdclk_freq) { + /* + * FIXME: We can end up here with all power domains off, yet + * with a CDCLK frequency other than the minimum. To account + * for this take the PIPE-A power domain, which covers the HW + * blocks needed for the following programming. This can be + * removed once it's guaranteed that we get here either with + * the minimum CDCLK set, or the required power domains + * enabled. + */ + intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + if (IS_CHERRYVIEW(dev)) cherryview_set_cdclk(dev, req_cdclk); else valleyview_set_cdclk(dev, req_cdclk); + + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); } } @@ -5153,36 +5259,6 @@ static void i9xx_crtc_off(struct drm_crtc *crtc) { } -static void intel_crtc_update_sarea(struct drm_crtc *crtc, - bool enabled) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_master_private *master_priv; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - - if (!dev->primary->master) - return; - - master_priv = dev->primary->master->driver_priv; - if (!master_priv->sarea_priv) - return; - - switch (pipe) { - case 0: - master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; - master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; - break; - case 1: - master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; - master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; - break; - default: - DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); - break; - } -} - /* Master function to enable/disable CRTC and corresponding power wells */ void intel_crtc_control(struct drm_crtc *crtc, bool enable) { @@ -5226,8 +5302,6 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc) enable |= intel_encoder->connectors_active; intel_crtc_control(crtc, enable); - - intel_crtc_update_sarea(crtc, enable); } static void intel_crtc_disable(struct drm_crtc *crtc) @@ -5242,7 +5316,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc) WARN_ON(!crtc->enabled); dev_priv->display.crtc_disable(crtc); - intel_crtc_update_sarea(crtc, false); dev_priv->display.off(crtc); if (crtc->primary->fb) { @@ -7549,6 +7622,22 @@ static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, &pipe_config->fdi_m_n, NULL); } +static void skylake_get_pfit_config(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t tmp; + + tmp = I915_READ(PS_CTL(crtc->pipe)); + + if (tmp & PS_ENABLE) { + pipe_config->pch_pfit.enabled = true; + pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe)); + pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe)); + } +} + static void ironlake_get_pfit_config(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config) { @@ -7962,6 +8051,28 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc) return 0; } +static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, + enum port port, + struct intel_crtc_config *pipe_config) +{ + u32 temp; + + temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); + pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); + + switch (pipe_config->ddi_pll_sel) { + case SKL_DPLL1: + pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; + break; + case SKL_DPLL2: + pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; + break; + case SKL_DPLL3: + pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; + break; + } +} + static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_config *pipe_config) @@ -7991,7 +8102,10 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; - haswell_get_ddi_pll(dev_priv, port, pipe_config); + if (IS_SKYLAKE(dev)) + skylake_get_ddi_pll(dev_priv, port, pipe_config); + else + haswell_get_ddi_pll(dev_priv, port, pipe_config); if (pipe_config->shared_dpll >= 0) { pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; @@ -8067,8 +8181,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, intel_get_pipe_timings(crtc, pipe_config); pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); - if (intel_display_power_is_enabled(dev_priv, pfit_domain)) - ironlake_get_pfit_config(crtc, pipe_config); + if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { + if (IS_SKYLAKE(dev)) + skylake_get_pfit_config(crtc, pipe_config); + else + ironlake_get_pfit_config(crtc, pipe_config); + } if (IS_HASWELL(dev)) pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && @@ -8292,7 +8410,7 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; unsigned old_width; @@ -8421,7 +8539,7 @@ __intel_framebuffer_create(struct drm_device *dev, intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { - drm_gem_object_unreference_unlocked(&obj->base); + drm_gem_object_unreference(&obj->base); return ERR_PTR(-ENOMEM); } @@ -8431,7 +8549,7 @@ __intel_framebuffer_create(struct drm_device *dev, return &intel_fb->base; err: - drm_gem_object_unreference_unlocked(&obj->base); + drm_gem_object_unreference(&obj->base); kfree(intel_fb); return ERR_PTR(ret); @@ -9066,6 +9184,10 @@ static bool page_flip_finished(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + if (i915_reset_in_progress(&dev_priv->gpu_error) || + crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) + return true; + /* * The relevant registers doen't exist on pre-ctg. * As the flip done interrupt doesn't trigger for mmio @@ -9461,6 +9583,69 @@ static int intel_queue_mmio_flip(struct drm_device *dev, return 0; } +static int intel_gen9_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj, + struct intel_engine_cs *ring, + uint32_t flags) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + uint32_t plane = 0, stride; + int ret; + + switch(intel_crtc->pipe) { + case PIPE_A: + plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A; + break; + case PIPE_B: + plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B; + break; + case PIPE_C: + plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C; + break; + default: + WARN_ONCE(1, "unknown plane in flip command\n"); + return -ENODEV; + } + + switch (obj->tiling_mode) { + case I915_TILING_NONE: + stride = fb->pitches[0] >> 6; + break; + case I915_TILING_X: + stride = fb->pitches[0] >> 9; + break; + default: + WARN_ONCE(1, "unknown tiling in flip command\n"); + return -ENODEV; + } + + ret = intel_ring_begin(ring, 10); + if (ret) + return ret; + + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, DERRMR); + intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | + DERRMR_PIPEB_PRI_FLIP_DONE | + DERRMR_PIPEC_PRI_FLIP_DONE)); + intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) | + MI_SRM_LRM_GLOBAL_GTT); + intel_ring_emit(ring, DERRMR); + intel_ring_emit(ring, ring->scratch.gtt_offset + 256); + intel_ring_emit(ring, 0); + + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane); + intel_ring_emit(ring, stride << 6 | obj->tiling_mode); + intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); + + intel_mark_page_flip_active(intel_crtc); + __intel_ring_advance(ring); + + return 0; +} + static int intel_default_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -9900,6 +10085,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, pipe_config->dp_m2_n2.link_n, pipe_config->dp_m2_n2.tu); + DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", + pipe_config->has_audio, + pipe_config->has_infoframe); + DRM_DEBUG_KMS("requested mode:\n"); drm_mode_debug_printmodeline(&pipe_config->requested_mode); DRM_DEBUG_KMS("adjusted mode:\n"); @@ -9961,6 +10150,48 @@ static bool check_encoder_cloning(struct intel_crtc *crtc) return true; } +static bool check_digital_port_conflicts(struct drm_device *dev) +{ + struct intel_connector *connector; + unsigned int used_ports = 0; + + /* + * Walk the connector list instead of the encoder + * list to detect the problem on ddi platforms + * where there's just one encoder per digital port. + */ + list_for_each_entry(connector, + &dev->mode_config.connector_list, base.head) { + struct intel_encoder *encoder = connector->new_encoder; + + if (!encoder) + continue; + + WARN_ON(!encoder->new_crtc); + + switch (encoder->type) { + unsigned int port_mask; + case INTEL_OUTPUT_UNKNOWN: + if (WARN_ON(!HAS_DDI(dev))) + break; + case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_HDMI: + case INTEL_OUTPUT_EDP: + port_mask = 1 << enc_to_dig_port(&encoder->base)->port; + + /* the same port mustn't appear more than once */ + if (used_ports & port_mask) + return false; + + used_ports |= port_mask; + default: + break; + } + } + + return true; +} + static struct intel_crtc_config * intel_modeset_pipe_config(struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -9977,6 +10208,11 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, return ERR_PTR(-EINVAL); } + if (!check_digital_port_conflicts(dev)) { + DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); + return ERR_PTR(-EINVAL); + } + pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); if (!pipe_config) return ERR_PTR(-ENOMEM); @@ -10368,6 +10604,7 @@ intel_pipe_config_compare(struct drm_device *dev, if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || IS_VALLEYVIEW(dev)) PIPE_CONF_CHECK_I(limited_color_range); + PIPE_CONF_CHECK_I(has_infoframe); PIPE_CONF_CHECK_I(has_audio); @@ -10424,6 +10661,9 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_X(dpll_hw_state.fp0); PIPE_CONF_CHECK_X(dpll_hw_state.fp1); PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); + PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); + PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); + PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) PIPE_CONF_CHECK_I(pipe_bpp); @@ -10747,44 +10987,61 @@ static void update_scanline_offset(struct intel_crtc *crtc) crtc->scanline_offset = 1; } +static struct intel_crtc_config * +intel_modeset_compute_config(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_framebuffer *fb, + unsigned *modeset_pipes, + unsigned *prepare_pipes, + unsigned *disable_pipes) +{ + struct intel_crtc_config *pipe_config = NULL; + + intel_modeset_affected_pipes(crtc, modeset_pipes, + prepare_pipes, disable_pipes); + + if ((*modeset_pipes) == 0) + goto out; + + /* + * Note this needs changes when we start tracking multiple modes + * and crtcs. At that point we'll need to compute the whole config + * (i.e. one pipe_config for each crtc) rather than just the one + * for this crtc. + */ + pipe_config = intel_modeset_pipe_config(crtc, fb, mode); + if (IS_ERR(pipe_config)) { + goto out; + } + intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, + "[modeset]"); + +out: + return pipe_config; +} + static int __intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, - int x, int y, struct drm_framebuffer *fb) + int x, int y, struct drm_framebuffer *fb, + struct intel_crtc_config *pipe_config, + unsigned modeset_pipes, + unsigned prepare_pipes, + unsigned disable_pipes) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *saved_mode; - struct intel_crtc_config *pipe_config = NULL; struct intel_crtc *intel_crtc; - unsigned disable_pipes, prepare_pipes, modeset_pipes; int ret = 0; saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL); if (!saved_mode) return -ENOMEM; - intel_modeset_affected_pipes(crtc, &modeset_pipes, - &prepare_pipes, &disable_pipes); - *saved_mode = crtc->mode; - /* Hack: Because we don't (yet) support global modeset on multiple - * crtcs, we don't keep track of the new mode for more than one crtc. - * Hence simply check whether any bit is set in modeset_pipes in all the - * pieces of code that are not yet converted to deal with mutliple crtcs - * changing their mode at the same time. */ - if (modeset_pipes) { - pipe_config = intel_modeset_pipe_config(crtc, fb, mode); - if (IS_ERR(pipe_config)) { - ret = PTR_ERR(pipe_config); - pipe_config = NULL; - - goto out; - } - intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, - "[modeset]"); + if (modeset_pipes) to_intel_crtc(crtc)->new_config = pipe_config; - } /* * See if the config requires any additional preparation, e.g. @@ -10826,6 +11083,10 @@ static int __intel_set_mode(struct drm_crtc *crtc, /* crtc->mode is already used by the ->mode_set callbacks, hence we need * to set it here already despite that we pass it down the callchain. + * + * Note we'll need to fix this up when we start tracking multiple + * pipes; here we assume a single modeset_pipe and only track the + * single crtc and mode. */ if (modeset_pipes) { crtc->mode = *mode; @@ -10887,19 +11148,23 @@ done: if (ret && crtc->enabled) crtc->mode = *saved_mode; -out: kfree(pipe_config); kfree(saved_mode); return ret; } -static int intel_set_mode(struct drm_crtc *crtc, - struct drm_display_mode *mode, - int x, int y, struct drm_framebuffer *fb) +static int intel_set_mode_pipes(struct drm_crtc *crtc, + struct drm_display_mode *mode, + int x, int y, struct drm_framebuffer *fb, + struct intel_crtc_config *pipe_config, + unsigned modeset_pipes, + unsigned prepare_pipes, + unsigned disable_pipes) { int ret; - ret = __intel_set_mode(crtc, mode, x, y, fb); + ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes, + prepare_pipes, disable_pipes); if (ret == 0) intel_modeset_check_state(crtc->dev); @@ -10907,6 +11172,26 @@ static int intel_set_mode(struct drm_crtc *crtc, return ret; } +static int intel_set_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode, + int x, int y, struct drm_framebuffer *fb) +{ + struct intel_crtc_config *pipe_config; + unsigned modeset_pipes, prepare_pipes, disable_pipes; + + pipe_config = intel_modeset_compute_config(crtc, mode, fb, + &modeset_pipes, + &prepare_pipes, + &disable_pipes); + + if (IS_ERR(pipe_config)) + return PTR_ERR(pipe_config); + + return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config, + modeset_pipes, prepare_pipes, + disable_pipes); +} + void intel_crtc_restore_mode(struct drm_crtc *crtc) { intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); @@ -11235,6 +11520,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set) struct drm_device *dev; struct drm_mode_set save_set; struct intel_set_config *config; + struct intel_crtc_config *pipe_config; + unsigned modeset_pipes, prepare_pipes, disable_pipes; int ret; BUG_ON(!set); @@ -11280,9 +11567,38 @@ static int intel_crtc_set_config(struct drm_mode_set *set) if (ret) goto fail; + pipe_config = intel_modeset_compute_config(set->crtc, set->mode, + set->fb, + &modeset_pipes, + &prepare_pipes, + &disable_pipes); + if (IS_ERR(pipe_config)) { + ret = PTR_ERR(pipe_config); + goto fail; + } else if (pipe_config) { + if (pipe_config->has_audio != + to_intel_crtc(set->crtc)->config.has_audio) + config->mode_changed = true; + + /* + * Note we have an issue here with infoframes: current code + * only updates them on the full mode set path per hw + * requirements. So here we should be checking for any + * required changes and forcing a mode set. + */ + } + + /* set_mode will free it in the mode_changed case */ + if (!config->mode_changed) + kfree(pipe_config); + + intel_update_pipe_size(to_intel_crtc(set->crtc)); + if (config->mode_changed) { - ret = intel_set_mode(set->crtc, set->mode, - set->x, set->y, set->fb); + ret = intel_set_mode_pipes(set->crtc, set->mode, + set->x, set->y, set->fb, pipe_config, + modeset_pipes, prepare_pipes, + disable_pipes); } else if (config->fb_changed) { struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); @@ -11555,8 +11871,8 @@ intel_commit_primary_plane(struct drm_plane *plane, struct drm_rect *src = &state->src; crtc->primary->fb = fb; - crtc->x = src->x1; - crtc->y = src->y1; + crtc->x = src->x1 >> 16; + crtc->y = src->y1 >> 16; intel_plane->crtc_x = state->orig_dst.x1; intel_plane->crtc_y = state->orig_dst.y1; @@ -12005,7 +12321,7 @@ enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); - if (!encoder) + if (!encoder || WARN_ON(!encoder->crtc)) return INVALID_PIPE; return to_intel_crtc(encoder->crtc)->pipe; @@ -12240,7 +12556,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (SUPPORTS_TV(dev)) intel_tv_init(dev); - intel_edp_psr_init(dev); + intel_psr_init(dev); for_each_intel_encoder(dev, encoder) { encoder->base.possible_crtcs = encoder->crtc_mask; @@ -12554,6 +12870,9 @@ static void intel_init_display(struct drm_device *dev) case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ dev_priv->display.queue_flip = intel_gen7_queue_flip; break; + case 9: + dev_priv->display.queue_flip = intel_gen9_queue_flip; + break; } intel_panel_init_backlight_funcs(dev); @@ -12738,11 +13057,7 @@ static void i915_disable_vga(struct drm_device *dev) vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); udelay(300); - /* - * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming - * from S3 without preserving (some of?) the other bits. - */ - I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE); + I915_WRITE(vga_reg, VGA_DISP_DISABLE); POSTING_READ(vga_reg); } @@ -12827,8 +13142,6 @@ void intel_modeset_init(struct drm_device *dev) intel_shared_dpll_init(dev); - /* save the BIOS value before clobbering it */ - dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev)); /* Just disable it once at startup */ i915_disable_vga(dev); intel_setup_outputs(dev); @@ -13258,8 +13571,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, - crtc->primary->fb); + intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, + crtc->primary->fb); } } else { intel_modeset_update_staged_output_state(dev); @@ -13270,6 +13583,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, void intel_modeset_gem_init(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *c; struct drm_i915_gem_object *obj; @@ -13277,6 +13591,16 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_init_gt_powersave(dev); mutex_unlock(&dev->struct_mutex); + /* + * There may be no VBT; and if the BIOS enabled SSC we can + * just keep using it to avoid unnecessary flicker. Whereas if the + * BIOS isn't using it, don't assume it will work even if the VBT + * indicates as much. + */ + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & + DREF_SSC1_ENABLE); + intel_modeset_init_hw(dev); intel_setup_overlay(dev); @@ -13302,6 +13626,8 @@ void intel_modeset_gem_init(struct drm_device *dev) } } mutex_unlock(&dev->struct_mutex); + + intel_backlight_register(dev); } void intel_connector_unregister(struct intel_connector *intel_connector) @@ -13317,9 +13643,13 @@ void intel_modeset_cleanup(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; + intel_disable_gt_powersave(dev); + + intel_backlight_unregister(dev); + /* * Interrupts and polling as the first thing to avoid creating havoc. - * Too much stuff here (turning of rps, connectors, ...) would + * Too much stuff here (turning of connectors, ...) would * experience fancy races otherwise. */ intel_irq_uninstall(dev_priv); @@ -13336,8 +13666,6 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_disable_fbc(dev); - intel_disable_gt_powersave(dev); - ironlake_teardown_rc6(dev); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ceb528f07c25..5cecc20efa71 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -227,8 +227,7 @@ intel_dp_mode_valid(struct drm_connector *connector, return MODE_OK; } -static uint32_t -pack_aux(const uint8_t *src, int src_bytes) +uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes) { int i; uint32_t v = 0; @@ -240,8 +239,7 @@ pack_aux(const uint8_t *src, int src_bytes) return v; } -static void -unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) { int i; if (dst_bytes > 4) @@ -863,7 +861,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, /* Load the send data into the aux channel data registers */ for (i = 0; i < send_bytes; i += 4) I915_WRITE(ch_data + i, - pack_aux(send + i, send_bytes - i)); + intel_dp_pack_aux(send + i, + send_bytes - i)); /* Send the command and wait for it to complete */ I915_WRITE(ch_ctl, send_ctl); @@ -917,8 +916,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, recv_bytes = recv_size; for (i = 0; i < recv_bytes; i += 4) - unpack_aux(I915_READ(ch_data + i), - recv + i, recv_bytes - i); + intel_dp_unpack_aux(I915_READ(ch_data + i), + recv + i, recv_bytes - i); ret = recv_bytes; out: @@ -1075,6 +1074,33 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) } static void +skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw) +{ + u32 ctrl1; + + pipe_config->ddi_pll_sel = SKL_DPLL0; + pipe_config->dpll_hw_state.cfgcr1 = 0; + pipe_config->dpll_hw_state.cfgcr2 = 0; + + ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0); + switch (link_bw) { + case DP_LINK_BW_1_62: + ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, + SKL_DPLL0); + break; + case DP_LINK_BW_2_7: + ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, + SKL_DPLL0); + break; + case DP_LINK_BW_5_4: + ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, + SKL_DPLL0); + break; + } + pipe_config->dpll_hw_state.ctrl1 = ctrl1; +} + +static void hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw) { switch (link_bw) { @@ -1251,7 +1277,9 @@ found: &pipe_config->dp_m2_n2); } - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_SKYLAKE(dev) && is_edp(intel_dp)) + skl_edp_set_pll_config(pipe_config, intel_dp->link_bw); + else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); else intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); @@ -1475,6 +1503,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) if (!is_edp(intel_dp)) return false; + cancel_delayed_work(&intel_dp->panel_vdd_work); intel_dp->want_panel_vdd = true; if (edp_have_panel_vdd(intel_dp)) @@ -2067,385 +2096,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder, } } -static bool is_edp_psr(struct intel_dp *intel_dp) -{ - return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; -} - -static bool intel_edp_is_psr_enabled(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!HAS_PSR(dev)) - return false; - - return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; -} - -static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, - struct edp_vsc_psr *vsc_psr) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); - u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); - u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); - uint32_t *data = (uint32_t *) vsc_psr; - unsigned int i; - - /* As per BSPec (Pipe Video Data Island Packet), we need to disable - the video DIP being updated before program video DIP data buffer - registers for DIP being updated. */ - I915_WRITE(ctl_reg, 0); - POSTING_READ(ctl_reg); - - for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { - if (i < sizeof(struct edp_vsc_psr)) - I915_WRITE(data_reg + i, *data++); - else - I915_WRITE(data_reg + i, 0); - } - - I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); - POSTING_READ(ctl_reg); -} - -static void intel_edp_psr_setup_vsc(struct intel_dp *intel_dp) -{ - struct edp_vsc_psr psr_vsc; - - /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ - memset(&psr_vsc, 0, sizeof(psr_vsc)); - psr_vsc.sdp_header.HB0 = 0; - psr_vsc.sdp_header.HB1 = 0x7; - psr_vsc.sdp_header.HB2 = 0x2; - psr_vsc.sdp_header.HB3 = 0x8; - intel_edp_psr_write_vsc(intel_dp, &psr_vsc); -} - -static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t aux_clock_divider; - int precharge = 0x3; - bool only_standby = false; - static const uint8_t aux_msg[] = { - [0] = DP_AUX_NATIVE_WRITE << 4, - [1] = DP_SET_POWER >> 8, - [2] = DP_SET_POWER & 0xff, - [3] = 1 - 1, - [4] = DP_SET_POWER_D0, - }; - int i; - - BUILD_BUG_ON(sizeof(aux_msg) > 20); - - aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); - - if (IS_BROADWELL(dev) && dig_port->port != PORT_A) - only_standby = true; - - /* Enable PSR in sink */ - if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); - else - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); - - /* Setup AUX registers */ - for (i = 0; i < sizeof(aux_msg); i += 4) - I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i, - pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); - - I915_WRITE(EDP_PSR_AUX_CTL(dev), - DP_AUX_CH_CTL_TIME_OUT_400us | - (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); -} - -static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t max_sleep_time = 0x1f; - uint32_t idle_frames = 1; - uint32_t val = 0x0; - const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; - bool only_standby = false; - - if (IS_BROADWELL(dev) && dig_port->port != PORT_A) - only_standby = true; - - if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) { - val |= EDP_PSR_LINK_STANDBY; - val |= EDP_PSR_TP2_TP3_TIME_0us; - val |= EDP_PSR_TP1_TIME_0us; - val |= EDP_PSR_SKIP_AUX_EXIT; - val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0; - } else - val |= EDP_PSR_LINK_DISABLE; - - I915_WRITE(EDP_PSR_CTL(dev), val | - (IS_BROADWELL(dev) ? 0 : link_entry_time) | - max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | - idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | - EDP_PSR_ENABLE); -} - -static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dig_port->base.base.crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - lockdep_assert_held(&dev_priv->psr.lock); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); - WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); - - dev_priv->psr.source_ok = false; - - if (IS_HASWELL(dev) && dig_port->port != PORT_A) { - DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); - return false; - } - - if (!i915.enable_psr) { - DRM_DEBUG_KMS("PSR disable by flag\n"); - return false; - } - - /* Below limitations aren't valid for Broadwell */ - if (IS_BROADWELL(dev)) - goto out; - - if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & - S3D_ENABLE) { - DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); - return false; - } - - if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { - DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); - return false; - } - - out: - dev_priv->psr.source_ok = true; - return true; -} - -static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) -{ - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); - WARN_ON(dev_priv->psr.active); - lockdep_assert_held(&dev_priv->psr.lock); - - /* Enable/Re-enable PSR on the host */ - intel_edp_psr_enable_source(intel_dp); - - dev_priv->psr.active = true; -} - -void intel_edp_psr_enable(struct intel_dp *intel_dp) -{ - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!HAS_PSR(dev)) { - DRM_DEBUG_KMS("PSR not supported on this platform\n"); - return; - } - - if (!is_edp_psr(intel_dp)) { - DRM_DEBUG_KMS("PSR not supported by this panel\n"); - return; - } - - mutex_lock(&dev_priv->psr.lock); - if (dev_priv->psr.enabled) { - DRM_DEBUG_KMS("PSR already in use\n"); - goto unlock; - } - - if (!intel_edp_psr_match_conditions(intel_dp)) - goto unlock; - - dev_priv->psr.busy_frontbuffer_bits = 0; - - intel_edp_psr_setup_vsc(intel_dp); - - /* Avoid continuous PSR exit by masking memup and hpd */ - I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | - EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); - - /* Enable PSR on the panel */ - intel_edp_psr_enable_sink(intel_dp); - - dev_priv->psr.enabled = intel_dp; -unlock: - mutex_unlock(&dev_priv->psr.lock); -} - -void intel_edp_psr_disable(struct intel_dp *intel_dp) -{ - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; - - mutex_lock(&dev_priv->psr.lock); - if (!dev_priv->psr.enabled) { - mutex_unlock(&dev_priv->psr.lock); - return; - } - - if (dev_priv->psr.active) { - I915_WRITE(EDP_PSR_CTL(dev), - I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); - - /* Wait till PSR is idle */ - if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & - EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) - DRM_ERROR("Timed out waiting for PSR Idle State\n"); - - dev_priv->psr.active = false; - } else { - WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); - } - - dev_priv->psr.enabled = NULL; - mutex_unlock(&dev_priv->psr.lock); - - cancel_delayed_work_sync(&dev_priv->psr.work); -} - -static void intel_edp_psr_work(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), psr.work.work); - struct intel_dp *intel_dp = dev_priv->psr.enabled; - - /* We have to make sure PSR is ready for re-enable - * otherwise it keeps disabled until next full enable/disable cycle. - * PSR might take some time to get fully disabled - * and be ready for re-enable. - */ - if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) & - EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { - DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); - return; - } - - mutex_lock(&dev_priv->psr.lock); - intel_dp = dev_priv->psr.enabled; - - if (!intel_dp) - goto unlock; - - /* - * The delayed work can race with an invalidate hence we need to - * recheck. Since psr_flush first clears this and then reschedules we - * won't ever miss a flush when bailing out here. - */ - if (dev_priv->psr.busy_frontbuffer_bits) - goto unlock; - - intel_edp_psr_do_enable(intel_dp); -unlock: - mutex_unlock(&dev_priv->psr.lock); -} - -static void intel_edp_psr_do_exit(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->psr.active) { - u32 val = I915_READ(EDP_PSR_CTL(dev)); - - WARN_ON(!(val & EDP_PSR_ENABLE)); - - I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); - - dev_priv->psr.active = false; - } - -} - -void intel_edp_psr_invalidate(struct drm_device *dev, - unsigned frontbuffer_bits) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; - enum pipe pipe; - - mutex_lock(&dev_priv->psr.lock); - if (!dev_priv->psr.enabled) { - mutex_unlock(&dev_priv->psr.lock); - return; - } - - crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; - pipe = to_intel_crtc(crtc)->pipe; - - intel_edp_psr_do_exit(dev); - - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); - - dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; - mutex_unlock(&dev_priv->psr.lock); -} - -void intel_edp_psr_flush(struct drm_device *dev, - unsigned frontbuffer_bits) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; - enum pipe pipe; - - mutex_lock(&dev_priv->psr.lock); - if (!dev_priv->psr.enabled) { - mutex_unlock(&dev_priv->psr.lock); - return; - } - - crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; - pipe = to_intel_crtc(crtc)->pipe; - dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; - - /* - * On Haswell sprite plane updates don't result in a psr invalidating - * signal in the hardware. Which means we need to manually fake this in - * software for all flushes, not just when we've seen a preceding - * invalidation through frontbuffer rendering. - */ - if (IS_HASWELL(dev) && - (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe))) - intel_edp_psr_do_exit(dev); - - if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) - schedule_delayed_work(&dev_priv->psr.work, - msecs_to_jiffies(100)); - mutex_unlock(&dev_priv->psr.lock); -} - -void intel_edp_psr_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work); - mutex_init(&dev_priv->psr.lock); -} - static void intel_disable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); @@ -4052,8 +3702,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count); if (attempts == 0) { - DRM_ERROR("Panel is unable to calculate CRC after 6 vblanks\n"); - return -EIO; + DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n"); + return -ETIMEDOUT; } if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) @@ -4686,6 +4336,7 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) * vdd might still be enabled do to the delayed vdd off. * Make sure vdd is actually turned off here. */ + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); pps_lock(intel_dp); edp_panel_vdd_off_sync(intel_dp); pps_unlock(intel_dp); @@ -5116,7 +4767,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) * hard to tell without seeing the user of this function of this code. * Check locking and ordering once that lands. */ - if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) { + if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) { DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n"); return; } @@ -5232,6 +4883,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, bool has_dpcd; struct drm_display_mode *scan; struct edid *edid; + enum pipe pipe = INVALID_PIPE; intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED; @@ -5300,11 +4952,30 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (IS_VALLEYVIEW(dev)) { intel_dp->edp_notifier.notifier_call = edp_notify_handler; register_reboot_notifier(&intel_dp->edp_notifier); + + /* + * Figure out the current pipe for the initial backlight setup. + * If the current pipe isn't valid, try the PPS pipe, and if that + * fails just assume pipe A. + */ + if (IS_CHERRYVIEW(dev)) + pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP); + else + pipe = PORT_TO_PIPE(intel_dp->DP); + + if (pipe != PIPE_A && pipe != PIPE_B) + pipe = intel_dp->pps_pipe; + + if (pipe != PIPE_A && pipe != PIPE_B) + pipe = PIPE_A; + + DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n", + pipe_name(pipe)); } intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); intel_connector->panel.backlight_power = intel_edp_backlight_power; - intel_panel_setup_backlight(connector); + intel_panel_setup_backlight(connector, pipe); return true; } diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index bfe359506377..7f8c6a66680a 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -283,7 +283,7 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force) struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; - return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port); + return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port); } static int @@ -414,6 +414,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo intel_dp_add_properties(intel_dp, connector); drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); + drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); + drm_mode_connector_set_path_property(connector, pathprop); drm_reinit_primary_mode_group(dev); mutex_lock(&dev->mode_config.mutex); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5c622ad2e9aa..25fdbb16d4e0 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -292,6 +292,9 @@ struct intel_crtc_config { * between pch encoders and cpu encoders. */ bool has_pch_encoder; + /* Are we sending infoframes on the attached port */ + bool has_infoframe; + /* CPU Transcoder for the pipe. Currently this can only differ from the * pipe on Haswell (where we have a special eDP transcoder). */ enum transcoder cpu_transcoder; @@ -340,7 +343,10 @@ struct intel_crtc_config { /* Selected dpll when shared or DPLL_ID_PRIVATE. */ enum intel_dpll_id shared_dpll; - /* PORT_CLK_SEL for DDI ports. */ + /* + * - PORT_CLK_SEL for DDI ports on HSW/BDW. + * - enum skl_dpll on SKL + */ uint32_t ddi_pll_sel; /* Actual register state of the dpll, for shared dpll cross-checking. */ @@ -552,6 +558,7 @@ struct intel_hdmi { void (*set_infoframes)(struct drm_encoder *encoder, bool enable, struct drm_display_mode *adjusted_mode); + bool (*infoframe_enabled)(struct drm_encoder *encoder); }; struct intel_dp_mst_encoder; @@ -784,8 +791,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen6_reset_rps_interrupts(struct drm_device *dev); +void gen6_enable_rps_interrupts(struct drm_device *dev); +void gen6_disable_rps_interrupts(struct drm_device *dev); void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) @@ -950,7 +958,8 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y, unsigned int tiling_mode, unsigned int bpp, unsigned int pitch); -void intel_display_handle_reset(struct drm_device *dev); +void intel_prepare_reset(struct drm_device *dev); +void intel_finish_reset(struct drm_device *dev); void hsw_enable_pc8(struct drm_i915_private *dev_priv); void hsw_disable_pc8(struct drm_i915_private *dev_priv); void intel_dp_get_m_n(struct intel_crtc *crtc, @@ -992,21 +1001,16 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp); void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); void intel_edp_panel_on(struct intel_dp *intel_dp); void intel_edp_panel_off(struct intel_dp *intel_dp); -void intel_edp_psr_enable(struct intel_dp *intel_dp); -void intel_edp_psr_disable(struct intel_dp *intel_dp); void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); -void intel_edp_psr_invalidate(struct drm_device *dev, - unsigned frontbuffer_bits); -void intel_edp_psr_flush(struct drm_device *dev, - unsigned frontbuffer_bits); -void intel_edp_psr_init(struct drm_device *dev); - void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector); void intel_dp_mst_suspend(struct drm_device *dev); void intel_dp_mst_resume(struct drm_device *dev); int intel_dp_max_link_bw(struct intel_dp *intel_dp); void intel_dp_hot_plug(struct intel_encoder *intel_encoder); void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); +uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); +void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes); + /* intel_dp_mst.c */ int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); @@ -1096,7 +1100,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc, int fitting_mode); void intel_panel_set_backlight_acpi(struct intel_connector *connector, u32 level, u32 max); -int intel_panel_setup_backlight(struct drm_connector *connector); +int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe); void intel_panel_enable_backlight(struct intel_connector *connector); void intel_panel_disable_backlight(struct intel_connector *connector); void intel_panel_destroy_backlight(struct drm_connector *connector); @@ -1106,6 +1110,19 @@ extern struct drm_display_mode *intel_find_panel_downclock( struct drm_device *dev, struct drm_display_mode *fixed_mode, struct drm_connector *connector); +void intel_backlight_register(struct drm_device *dev); +void intel_backlight_unregister(struct drm_device *dev); + + +/* intel_psr.c */ +bool intel_psr_is_enabled(struct drm_device *dev); +void intel_psr_enable(struct intel_dp *intel_dp); +void intel_psr_disable(struct intel_dp *intel_dp); +void intel_psr_invalidate(struct drm_device *dev, + unsigned frontbuffer_bits); +void intel_psr_flush(struct drm_device *dev, + unsigned frontbuffer_bits); +void intel_psr_init(struct drm_device *dev); /* intel_runtime_pm.c */ int intel_power_domains_init(struct drm_i915_private *); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index f2183b554cbc..850cf7d6578c 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -324,6 +324,7 @@ intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc) static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, struct drm_fb_helper_crtc **crtcs, struct drm_display_mode **modes, + struct drm_fb_offset *offsets, bool *enabled, int width, int height) { struct drm_device *dev = fb_helper->dev; @@ -332,6 +333,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, bool fallback = true; int num_connectors_enabled = 0; int num_connectors_detected = 0; + uint64_t conn_configured = 0, mask; + int pass = 0; save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), GFP_KERNEL); @@ -339,7 +342,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, return false; memcpy(save_enabled, enabled, dev->mode_config.num_connector); - + mask = (1 << fb_helper->connector_count) - 1; +retry: for (i = 0; i < fb_helper->connector_count; i++) { struct drm_fb_helper_connector *fb_conn; struct drm_connector *connector; @@ -349,12 +353,19 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, fb_conn = fb_helper->connector_info[i]; connector = fb_conn->connector; + if (conn_configured & (1 << i)) + continue; + + if (pass == 0 && !connector->has_tile) + continue; + if (connector->status == connector_status_connected) num_connectors_detected++; if (!enabled[i]) { DRM_DEBUG_KMS("connector %s not enabled, skipping\n", connector->name); + conn_configured |= (1 << i); continue; } @@ -373,6 +384,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", connector->name); enabled[i] = false; + conn_configured |= (1 << i); continue; } @@ -400,8 +412,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, /* try for preferred next */ if (!modes[i]) { - DRM_DEBUG_KMS("looking for preferred mode on connector %s\n", - connector->name); + DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n", + connector->name, connector->has_tile); modes[i] = drm_has_preferred_mode(fb_conn, width, height); } @@ -444,6 +456,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); fallback = false; + conn_configured |= (1 << i); + } + + if ((conn_configured & mask) != mask) { + pass++; + goto retry; } /* diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c index 58cf2e6b78f4..79f6d72179c5 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c @@ -156,7 +156,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); - intel_edp_psr_invalidate(dev, obj->frontbuffer_bits); + intel_psr_invalidate(dev, obj->frontbuffer_bits); } /** @@ -182,7 +182,7 @@ void intel_frontbuffer_flush(struct drm_device *dev, intel_mark_fb_busy(dev, frontbuffer_bits, NULL); - intel_edp_psr_flush(dev, frontbuffer_bits); + intel_psr_flush(dev, frontbuffer_bits); /* * FIXME: Unconditional fbc flushing here is a rather gross hack and diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 29baa53aef90..3abc2000fce9 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -166,6 +166,19 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, POSTING_READ(VIDEO_DIP_CTL); } +static bool g4x_infoframe_enabled(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + u32 val = I915_READ(VIDEO_DIP_CTL); + + if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK)) + return val & VIDEO_DIP_ENABLE; + + return false; +} + static void ibx_write_infoframe(struct drm_encoder *encoder, enum hdmi_infoframe_type type, const void *frame, ssize_t len) @@ -204,6 +217,17 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, POSTING_READ(reg); } +static bool ibx_infoframe_enabled(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + u32 val = I915_READ(reg); + + return val & VIDEO_DIP_ENABLE; +} + static void cpt_write_infoframe(struct drm_encoder *encoder, enum hdmi_infoframe_type type, const void *frame, ssize_t len) @@ -245,6 +269,17 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, POSTING_READ(reg); } +static bool cpt_infoframe_enabled(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + u32 val = I915_READ(reg); + + return val & VIDEO_DIP_ENABLE; +} + static void vlv_write_infoframe(struct drm_encoder *encoder, enum hdmi_infoframe_type type, const void *frame, ssize_t len) @@ -283,6 +318,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, POSTING_READ(reg); } +static bool vlv_infoframe_enabled(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); + u32 val = I915_READ(reg); + + return val & VIDEO_DIP_ENABLE; +} + static void hsw_write_infoframe(struct drm_encoder *encoder, enum hdmi_infoframe_type type, const void *frame, ssize_t len) @@ -320,6 +366,18 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, POSTING_READ(ctl_reg); } +static bool hsw_infoframe_enabled(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); + u32 val = I915_READ(ctl_reg); + + return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW | + VIDEO_DIP_ENABLE_VS_HSW); +} + /* * The data we write to the DIP data buffer registers is 1 byte bigger than the * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting @@ -724,6 +782,9 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, if (tmp & HDMI_MODE_SELECT_HDMI) pipe_config->has_hdmi_sink = true; + if (intel_hdmi->infoframe_enabled(&encoder->base)) + pipe_config->has_infoframe = true; + if (tmp & SDVO_AUDIO_ENABLE) pipe_config->has_audio = true; @@ -925,6 +986,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink; + if (pipe_config->has_hdmi_sink) + pipe_config->has_infoframe = true; + if (intel_hdmi->color_range_auto) { /* See CEA-861-E - 5.1 Default Encoding Parameters */ if (pipe_config->has_hdmi_sink && @@ -1397,10 +1461,13 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder) static void chv_hdmi_pre_enable(struct intel_encoder *encoder) { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_hdmi *intel_hdmi = &dport->hdmi; struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct drm_display_mode *adjusted_mode = + &intel_crtc->config.adjusted_mode; enum dpio_channel ch = vlv_dport_to_channel(dport); int pipe = intel_crtc->pipe; int data, i; @@ -1525,6 +1592,10 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) mutex_unlock(&dev_priv->dpio_lock); + intel_hdmi->set_infoframes(&encoder->base, + intel_crtc->config.has_hdmi_sink, + adjusted_mode); + intel_enable_hdmi(encoder); vlv_wait_port_ready(dev_priv, dport); @@ -1619,18 +1690,23 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, if (IS_VALLEYVIEW(dev)) { intel_hdmi->write_infoframe = vlv_write_infoframe; intel_hdmi->set_infoframes = vlv_set_infoframes; + intel_hdmi->infoframe_enabled = vlv_infoframe_enabled; } else if (IS_G4X(dev)) { intel_hdmi->write_infoframe = g4x_write_infoframe; intel_hdmi->set_infoframes = g4x_set_infoframes; + intel_hdmi->infoframe_enabled = g4x_infoframe_enabled; } else if (HAS_DDI(dev)) { intel_hdmi->write_infoframe = hsw_write_infoframe; intel_hdmi->set_infoframes = hsw_set_infoframes; + intel_hdmi->infoframe_enabled = hsw_infoframe_enabled; } else if (HAS_PCH_IBX(dev)) { intel_hdmi->write_infoframe = ibx_write_infoframe; intel_hdmi->set_infoframes = ibx_set_infoframes; + intel_hdmi->infoframe_enabled = ibx_infoframe_enabled; } else { intel_hdmi->write_infoframe = cpt_write_infoframe; intel_hdmi->set_infoframes = cpt_set_infoframes; + intel_hdmi->infoframe_enabled = cpt_infoframe_enabled; } if (HAS_DDI(dev)) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 6025ac754c37..e588376227ea 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -136,11 +136,10 @@ #include <drm/i915_drm.h> #include "i915_drv.h" +#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE) -#define GEN8_LR_CONTEXT_ALIGN 4096 - #define RING_EXECLIST_QFULL (1 << 0x2) #define RING_EXECLIST1_VALID (1 << 0x3) #define RING_EXECLIST0_VALID (1 << 0x4) @@ -204,6 +203,9 @@ enum { }; #define GEN8_CTX_ID_SHIFT 32 +static int intel_lr_context_pin(struct intel_engine_cs *ring, + struct intel_context *ctx); + /** * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists * @dev: DRM device. @@ -219,6 +221,9 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists { WARN_ON(i915.enable_ppgtt == -1); + if (INTEL_INFO(dev)->gen >= 9) + return 1; + if (enable_execlists == 0) return 0; @@ -275,7 +280,8 @@ static void execlists_elsp_write(struct intel_engine_cs *ring, struct drm_i915_gem_object *ctx_obj0, struct drm_i915_gem_object *ctx_obj1) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; uint64_t temp = 0; uint32_t desc[4]; unsigned long flags; @@ -300,13 +306,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring, * Instead, we do the runtime_pm_get/put when creating/destroying requests. */ spin_lock_irqsave(&dev_priv->uncore.lock, flags); - if (IS_CHERRYVIEW(dev_priv->dev)) { + if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) { if (dev_priv->uncore.fw_rendercount++ == 0) dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_RENDER); if (dev_priv->uncore.fw_mediacount++ == 0) dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_MEDIA); + if (INTEL_INFO(dev)->gen >= 9) { + if (dev_priv->uncore.fw_blittercount++ == 0) + dev_priv->uncore.funcs.force_wake_get(dev_priv, + FORCEWAKE_BLITTER); + } } else { if (dev_priv->uncore.forcewake_count++ == 0) dev_priv->uncore.funcs.force_wake_get(dev_priv, @@ -325,13 +336,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring, /* Release Force Wakeup (see the big comment above). */ spin_lock_irqsave(&dev_priv->uncore.lock, flags); - if (IS_CHERRYVIEW(dev_priv->dev)) { + if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) { if (--dev_priv->uncore.fw_rendercount == 0) dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_RENDER); if (--dev_priv->uncore.fw_mediacount == 0) dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_MEDIA); + if (INTEL_INFO(dev)->gen >= 9) { + if (--dev_priv->uncore.fw_blittercount == 0) + dev_priv->uncore.funcs.force_wake_put(dev_priv, + FORCEWAKE_BLITTER); + } } else { if (--dev_priv->uncore.forcewake_count == 0) dev_priv->uncore.funcs.force_wake_put(dev_priv, @@ -341,7 +357,9 @@ static void execlists_elsp_write(struct intel_engine_cs *ring, spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); } -static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail) +static int execlists_update_context(struct drm_i915_gem_object *ctx_obj, + struct drm_i915_gem_object *ring_obj, + u32 tail) { struct page *page; uint32_t *reg_state; @@ -350,6 +368,7 @@ static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tai reg_state = kmap_atomic(page); reg_state[CTX_RING_TAIL+1] = tail; + reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj); kunmap_atomic(reg_state); @@ -360,21 +379,25 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring, struct intel_context *to0, u32 tail0, struct intel_context *to1, u32 tail1) { - struct drm_i915_gem_object *ctx_obj0; + struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state; + struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf; struct drm_i915_gem_object *ctx_obj1 = NULL; + struct intel_ringbuffer *ringbuf1 = NULL; - ctx_obj0 = to0->engine[ring->id].state; BUG_ON(!ctx_obj0); WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0)); + WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj)); - execlists_ctx_write_tail(ctx_obj0, tail0); + execlists_update_context(ctx_obj0, ringbuf0->obj, tail0); if (to1) { + ringbuf1 = to1->engine[ring->id].ringbuf; ctx_obj1 = to1->engine[ring->id].state; BUG_ON(!ctx_obj1); WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1)); + WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj)); - execlists_ctx_write_tail(ctx_obj1, tail1); + execlists_update_context(ctx_obj1, ringbuf1->obj, tail1); } execlists_elsp_write(ring, ctx_obj0, ctx_obj1); @@ -384,7 +407,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring) { struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL; struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL; - struct drm_i915_private *dev_priv = ring->dev->dev_private; assert_spin_locked(&ring->execlist_lock); @@ -401,7 +423,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring) * will update tail past first request's workload */ cursor->elsp_submitted = req0->elsp_submitted; list_del(&req0->execlist_link); - queue_work(dev_priv->wq, &req0->work); + list_add_tail(&req0->execlist_link, + &ring->execlist_retired_req_list); req0 = cursor; } else { req1 = cursor; @@ -423,7 +446,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring) static bool execlists_check_remove_request(struct intel_engine_cs *ring, u32 request_id) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; struct intel_ctx_submit_request *head_req; assert_spin_locked(&ring->execlist_lock); @@ -441,7 +463,8 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring, if (--head_req->elsp_submitted <= 0) { list_del(&head_req->execlist_link); - queue_work(dev_priv->wq, &head_req->work); + list_add_tail(&head_req->execlist_link, + &ring->execlist_retired_req_list); return true; } } @@ -510,22 +533,6 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring) ((u32)ring->next_context_status_buffer & 0x07) << 8); } -static void execlists_free_request_task(struct work_struct *work) -{ - struct intel_ctx_submit_request *req = - container_of(work, struct intel_ctx_submit_request, work); - struct drm_device *dev = req->ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - intel_runtime_pm_put(dev_priv); - - mutex_lock(&dev->struct_mutex); - i915_gem_context_unreference(req->ctx); - mutex_unlock(&dev->struct_mutex); - - kfree(req); -} - static int execlists_context_queue(struct intel_engine_cs *ring, struct intel_context *to, u32 tail) @@ -540,9 +547,12 @@ static int execlists_context_queue(struct intel_engine_cs *ring, return -ENOMEM; req->ctx = to; i915_gem_context_reference(req->ctx); + + if (to != ring->default_context) + intel_lr_context_pin(ring, to); + req->ring = ring; req->tail = tail; - INIT_WORK(&req->work, execlists_free_request_task); intel_runtime_pm_get(dev_priv); @@ -561,9 +571,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring, if (to == tail_req->ctx) { WARN(tail_req->elsp_submitted != 0, - "More than 2 already-submitted reqs queued\n"); + "More than 2 already-submitted reqs queued\n"); list_del(&tail_req->execlist_link); - queue_work(dev_priv->wq, &tail_req->work); + list_add_tail(&tail_req->execlist_link, + &ring->execlist_retired_req_list); } } @@ -731,6 +742,36 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, return 0; } +void intel_execlists_retire_requests(struct intel_engine_cs *ring) +{ + struct intel_ctx_submit_request *req, *tmp; + struct drm_i915_private *dev_priv = ring->dev->dev_private; + unsigned long flags; + struct list_head retired_list; + + WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + if (list_empty(&ring->execlist_retired_req_list)) + return; + + INIT_LIST_HEAD(&retired_list); + spin_lock_irqsave(&ring->execlist_lock, flags); + list_replace_init(&ring->execlist_retired_req_list, &retired_list); + spin_unlock_irqrestore(&ring->execlist_lock, flags); + + list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { + struct intel_context *ctx = req->ctx; + struct drm_i915_gem_object *ctx_obj = + ctx->engine[ring->id].state; + + if (ctx_obj && (ctx != ring->default_context)) + intel_lr_context_unpin(ring, ctx); + intel_runtime_pm_put(dev_priv); + i915_gem_context_unreference(req->ctx); + list_del(&req->execlist_link); + kfree(req); + } +} + void intel_logical_ring_stop(struct intel_engine_cs *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; @@ -791,9 +832,55 @@ void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf) execlists_context_queue(ring, ctx, ringbuf->tail); } +static int intel_lr_context_pin(struct intel_engine_cs *ring, + struct intel_context *ctx) +{ + struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; + struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; + int ret = 0; + + WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + if (ctx->engine[ring->id].unpin_count++ == 0) { + ret = i915_gem_obj_ggtt_pin(ctx_obj, + GEN8_LR_CONTEXT_ALIGN, 0); + if (ret) + goto reset_unpin_count; + + ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); + if (ret) + goto unpin_ctx_obj; + } + + return ret; + +unpin_ctx_obj: + i915_gem_object_ggtt_unpin(ctx_obj); +reset_unpin_count: + ctx->engine[ring->id].unpin_count = 0; + + return ret; +} + +void intel_lr_context_unpin(struct intel_engine_cs *ring, + struct intel_context *ctx) +{ + struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; + struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; + + if (ctx_obj) { + WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + if (--ctx->engine[ring->id].unpin_count == 0) { + intel_unpin_ringbuffer_obj(ringbuf); + i915_gem_object_ggtt_unpin(ctx_obj); + } + } +} + static int logical_ring_alloc_seqno(struct intel_engine_cs *ring, struct intel_context *ctx) { + int ret; + if (ring->outstanding_lazy_seqno) return 0; @@ -804,6 +891,14 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring, if (request == NULL) return -ENOMEM; + if (ctx != ring->default_context) { + ret = intel_lr_context_pin(ring, ctx); + if (ret) { + kfree(request); + return ret; + } + } + /* Hold a reference to the context this request belongs to * (we will need it when the time comes to emit/retire the * request). @@ -989,6 +1084,44 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords) return 0; } +static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring, + struct intel_context *ctx) +{ + int ret, i; + struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_workarounds *w = &dev_priv->workarounds; + + if (WARN_ON(w->count == 0)) + return 0; + + ring->gpu_caches_dirty = true; + ret = logical_ring_flush_all_caches(ringbuf); + if (ret) + return ret; + + ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2); + if (ret) + return ret; + + intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count)); + for (i = 0; i < w->count; i++) { + intel_logical_ring_emit(ringbuf, w->reg[i].addr); + intel_logical_ring_emit(ringbuf, w->reg[i].value); + } + intel_logical_ring_emit(ringbuf, MI_NOOP); + + intel_logical_ring_advance(ringbuf); + + ring->gpu_caches_dirty = true; + ret = logical_ring_flush_all_caches(ringbuf); + if (ret) + return ret; + + return 0; +} + static int gen8_init_common_ring(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; @@ -1032,7 +1165,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - return ret; + return init_workarounds_ring(ring); } static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf, @@ -1248,6 +1381,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin init_waitqueue_head(&ring->irq_queue); INIT_LIST_HEAD(&ring->execlist_queue); + INIT_LIST_HEAD(&ring->execlist_retired_req_list); spin_lock_init(&ring->execlist_lock); ring->next_context_status_buffer = 0; @@ -1282,6 +1416,7 @@ static int logical_render_ring_init(struct drm_device *dev) ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; ring->init = gen8_init_render_ring; + ring->init_context = intel_logical_ring_workarounds_emit; ring->cleanup = intel_fini_pipe_control; ring->get_seqno = gen8_get_seqno; ring->set_seqno = gen8_set_seqno; @@ -1495,7 +1630,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *ring_obj = ringbuf->obj; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; struct page *page; uint32_t *reg_state; @@ -1541,7 +1675,9 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); reg_state[CTX_RING_TAIL+1] = 0; reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base); - reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj); + /* Ring buffer start address is not known until the buffer is pinned. + * It is written to the context image in execlists_update_context() + */ reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); reg_state[CTX_RING_BUFFER_CONTROL+1] = ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID; @@ -1617,12 +1753,18 @@ void intel_lr_context_free(struct intel_context *ctx) for (i = 0; i < I915_NUM_RINGS; i++) { struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; - struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; if (ctx_obj) { + struct intel_ringbuffer *ringbuf = + ctx->engine[i].ringbuf; + struct intel_engine_cs *ring = ringbuf->ring; + + if (ctx == ring->default_context) { + intel_unpin_ringbuffer_obj(ringbuf); + i915_gem_object_ggtt_unpin(ctx_obj); + } intel_destroy_ringbuffer_obj(ringbuf); kfree(ringbuf); - i915_gem_object_ggtt_unpin(ctx_obj); drm_gem_object_unreference(&ctx_obj->base); } } @@ -1632,11 +1774,14 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring) { int ret = 0; - WARN_ON(INTEL_INFO(ring->dev)->gen != 8); + WARN_ON(INTEL_INFO(ring->dev)->gen < 8); switch (ring->id) { case RCS: - ret = GEN8_LR_CONTEXT_RENDER_SIZE; + if (INTEL_INFO(ring->dev)->gen >= 9) + ret = GEN9_LR_CONTEXT_RENDER_SIZE; + else + ret = GEN8_LR_CONTEXT_RENDER_SIZE; break; case VCS: case BCS: @@ -1649,7 +1794,7 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring) return ret; } -static int lrc_setup_hardware_status_page(struct intel_engine_cs *ring, +static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, struct drm_i915_gem_object *default_ctx_obj) { struct drm_i915_private *dev_priv = ring->dev->dev_private; @@ -1659,15 +1804,11 @@ static int lrc_setup_hardware_status_page(struct intel_engine_cs *ring, ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj); ring->status_page.page_addr = kmap(sg_page(default_ctx_obj->pages->sgl)); - if (ring->status_page.page_addr == NULL) - return -ENOMEM; ring->status_page.obj = default_ctx_obj; I915_WRITE(RING_HWS_PGA(ring->mmio_base), (u32)ring->status_page.gfx_addr); POSTING_READ(RING_HWS_PGA(ring->mmio_base)); - - return 0; } /** @@ -1686,6 +1827,7 @@ static int lrc_setup_hardware_status_page(struct intel_engine_cs *ring, int intel_lr_context_deferred_create(struct intel_context *ctx, struct intel_engine_cs *ring) { + const bool is_global_default_ctx = (ctx == ring->default_context); struct drm_device *dev = ring->dev; struct drm_i915_gem_object *ctx_obj; uint32_t context_size; @@ -1705,21 +1847,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, return ret; } - ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0); - if (ret) { - DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret); - drm_gem_object_unreference(&ctx_obj->base); - return ret; + if (is_global_default_ctx) { + ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0); + if (ret) { + DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", + ret); + drm_gem_object_unreference(&ctx_obj->base); + return ret; + } } ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); if (!ringbuf) { DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", ring->name); - i915_gem_object_ggtt_unpin(ctx_obj); - drm_gem_object_unreference(&ctx_obj->base); ret = -ENOMEM; - return ret; + goto error_unpin_ctx; } ringbuf->ring = ring; @@ -1732,43 +1875,51 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, ringbuf->space = ringbuf->size; ringbuf->last_retired_head = -1; - /* TODO: For now we put this in the mappable region so that we can reuse - * the existing ringbuffer code which ioremaps it. When we start - * creating many contexts, this will no longer work and we must switch - * to a kmapish interface. - */ - ret = intel_alloc_ringbuffer_obj(dev, ringbuf); - if (ret) { - DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n", + if (ringbuf->obj == NULL) { + ret = intel_alloc_ringbuffer_obj(dev, ringbuf); + if (ret) { + DRM_DEBUG_DRIVER( + "Failed to allocate ringbuffer obj %s: %d\n", ring->name, ret); - goto error; + goto error_free_rbuf; + } + + if (is_global_default_ctx) { + ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); + if (ret) { + DRM_ERROR( + "Failed to pin and map ringbuffer %s: %d\n", + ring->name, ret); + goto error_destroy_rbuf; + } + } + } ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); if (ret) { DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); - intel_destroy_ringbuffer_obj(ringbuf); goto error; } ctx->engine[ring->id].ringbuf = ringbuf; ctx->engine[ring->id].state = ctx_obj; - if (ctx == ring->default_context) { - ret = lrc_setup_hardware_status_page(ring, ctx_obj); - if (ret) { - DRM_ERROR("Failed to setup hardware status page\n"); - goto error; - } - } + if (ctx == ring->default_context) + lrc_setup_hardware_status_page(ring, ctx_obj); if (ring->id == RCS && !ctx->rcs_initialized) { + if (ring->init_context) { + ret = ring->init_context(ring, ctx); + if (ret) + DRM_ERROR("ring init context: %d\n", ret); + } + ret = intel_lr_context_render_state_init(ring, ctx); if (ret) { DRM_ERROR("Init render state failed: %d\n", ret); ctx->engine[ring->id].ringbuf = NULL; ctx->engine[ring->id].state = NULL; - intel_destroy_ringbuffer_obj(ringbuf); goto error; } ctx->rcs_initialized = true; @@ -1777,8 +1928,15 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, return 0; error: + if (is_global_default_ctx) + intel_unpin_ringbuffer_obj(ringbuf); +error_destroy_rbuf: + intel_destroy_ringbuffer_obj(ringbuf); +error_free_rbuf: kfree(ringbuf); - i915_gem_object_ggtt_unpin(ctx_obj); +error_unpin_ctx: + if (is_global_default_ctx) + i915_gem_object_ggtt_unpin(ctx_obj); drm_gem_object_unreference(&ctx_obj->base); return ret; } diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 33c3b4bf28c5..14b216b9be7f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -24,6 +24,8 @@ #ifndef _INTEL_LRC_H_ #define _INTEL_LRC_H_ +#define GEN8_LR_CONTEXT_ALIGN 4096 + /* Execlists regs */ #define RING_ELSP(ring) ((ring)->mmio_base+0x230) #define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) @@ -67,6 +69,8 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring, void intel_lr_context_free(struct intel_context *ctx); int intel_lr_context_deferred_create(struct intel_context *ctx, struct intel_engine_cs *ring); +void intel_lr_context_unpin(struct intel_engine_cs *ring, + struct intel_context *ctx); /* Execlists */ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); @@ -104,11 +108,11 @@ struct intel_ctx_submit_request { u32 tail; struct list_head execlist_link; - struct work_struct work; int elsp_submitted; }; void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring); +void intel_execlists_retire_requests(struct intel_engine_cs *ring); #endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 2b50c98dd6b0..14654d628ca4 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -899,6 +899,17 @@ void intel_lvds_init(struct drm_device *dev) int pipe; u8 pin; + /* + * Unlock registers and just leave them unlocked. Do this before + * checking quirk lists to avoid bogus WARNINGs. + */ + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(PCH_PP_CONTROL, + I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); + } else { + I915_WRITE(PP_CONTROL, + I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); + } if (!intel_lvds_supported(dev)) return; @@ -1097,17 +1108,6 @@ out: lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK; - /* - * Unlock registers and just - * leave them unlocked - */ - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_PP_CONTROL, - I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); - } else { - I915_WRITE(PP_CONTROL, - I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); - } lvds_connector->lid_notifier.notifier_call = intel_lid_notify; if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { DRM_DEBUG_KMS("lid notifier registration failed\n"); @@ -1116,7 +1116,7 @@ out: drm_connector_register(connector); intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); - intel_panel_setup_backlight(connector); + intel_panel_setup_backlight(connector, INVALID_PIPE); return; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index e18b3f49074c..4d63839bd9b4 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -521,6 +521,9 @@ static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; + if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) + return 0; + return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK; } @@ -536,12 +539,15 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 val; + struct intel_panel *panel = &connector->panel; + u32 val = 0; mutex_lock(&dev_priv->backlight_lock); - val = dev_priv->display.get_backlight(connector); - val = intel_panel_compute_brightness(connector, val); + if (panel->backlight.enabled) { + val = dev_priv->display.get_backlight(connector); + val = intel_panel_compute_brightness(connector, val); + } mutex_unlock(&dev_priv->backlight_lock); @@ -602,6 +608,9 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level) enum pipe pipe = intel_get_pipe_from_connector(connector); u32 tmp; + if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) + return; + tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK; I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level); } @@ -625,10 +634,9 @@ static void intel_panel_set_backlight(struct intel_connector *connector, struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_panel *panel = &connector->panel; - enum pipe pipe = intel_get_pipe_from_connector(connector); u32 hw_level; - if (!panel->backlight.present || pipe == INVALID_PIPE) + if (!panel->backlight.present) return; mutex_lock(&dev_priv->backlight_lock); @@ -656,6 +664,12 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector, enum pipe pipe = intel_get_pipe_from_connector(connector); u32 hw_level; + /* + * INVALID_PIPE may occur during driver init because + * connection_mutex isn't held across the entire backlight + * setup + modeset readout, and the BIOS can issue the + * requests at any time. + */ if (!panel->backlight.present || pipe == INVALID_PIPE) return; @@ -717,6 +731,9 @@ static void vlv_disable_backlight(struct intel_connector *connector) enum pipe pipe = intel_get_pipe_from_connector(connector); u32 tmp; + if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) + return; + intel_panel_actually_set_backlight(connector, 0); tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe)); @@ -728,9 +745,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector) struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_panel *panel = &connector->panel; - enum pipe pipe = intel_get_pipe_from_connector(connector); - if (!panel->backlight.present || pipe == INVALID_PIPE) + if (!panel->backlight.present) return; /* @@ -906,6 +922,9 @@ static void vlv_enable_backlight(struct intel_connector *connector) enum pipe pipe = intel_get_pipe_from_connector(connector); u32 ctl, ctl2; + if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) + return; + ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); if (ctl2 & BLM_PWM_ENABLE) { DRM_DEBUG_KMS("backlight already enabled\n"); @@ -934,7 +953,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector) struct intel_panel *panel = &connector->panel; enum pipe pipe = intel_get_pipe_from_connector(connector); - if (!panel->backlight.present || pipe == INVALID_PIPE) + if (!panel->backlight.present) return; DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); @@ -1026,6 +1045,9 @@ static int intel_backlight_device_register(struct intel_connector *connector) if (WARN_ON(panel->backlight.device)) return -ENODEV; + if (!panel->backlight.present) + return 0; + WARN_ON(panel->backlight.max == 0); memset(&props, 0, sizeof(props)); @@ -1061,6 +1083,10 @@ static int intel_backlight_device_register(struct intel_connector *connector) panel->backlight.device = NULL; return -ENODEV; } + + DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n", + connector->base.name); + return 0; } @@ -1094,15 +1120,28 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector) struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_panel *panel = &connector->panel; + int min; WARN_ON(panel->backlight.max == 0); + /* + * XXX: If the vbt value is 255, it makes min equal to max, which leads + * to problems. There are such machines out there. Either our + * interpretation is wrong or the vbt has bogus data. Or both. Safeguard + * against this by letting the minimum be at most (arbitrarily chosen) + * 25% of the max. + */ + min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64); + if (min != dev_priv->vbt.backlight.min_brightness) { + DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n", + dev_priv->vbt.backlight.min_brightness, min); + } + /* vbt value is a coefficient in range [0..255] */ - return scale(dev_priv->vbt.backlight.min_brightness, 0, 255, - 0, panel->backlight.max); + return scale(min, 0, 255, 0, panel->backlight.max); } -static int bdw_setup_backlight(struct intel_connector *connector) +static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1128,7 +1167,7 @@ static int bdw_setup_backlight(struct intel_connector *connector) return 0; } -static int pch_setup_backlight(struct intel_connector *connector) +static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1155,7 +1194,7 @@ static int pch_setup_backlight(struct intel_connector *connector) return 0; } -static int i9xx_setup_backlight(struct intel_connector *connector) +static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1187,7 +1226,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector) return 0; } -static int i965_setup_backlight(struct intel_connector *connector) +static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1217,37 +1256,40 @@ static int i965_setup_backlight(struct intel_connector *connector) return 0; } -static int vlv_setup_backlight(struct intel_connector *connector) +static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe) { struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_panel *panel = &connector->panel; - enum pipe pipe; + enum pipe p; u32 ctl, ctl2, val; - for_each_pipe(dev_priv, pipe) { - u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe)); + for_each_pipe(dev_priv, p) { + u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p)); /* Skip if the modulation freq is already set */ if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK) continue; cur_val &= BACKLIGHT_DUTY_CYCLE_MASK; - I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) | + I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) | cur_val); } - ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A)); + if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) + return -ENODEV; + + ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; - ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A)); + ctl = I915_READ(VLV_BLC_PWM_CTL(pipe)); panel->backlight.max = ctl >> 16; if (!panel->backlight.max) return -ENODEV; panel->backlight.min = get_backlight_min_vbt(connector); - val = _vlv_get_backlight(dev, PIPE_A); + val = _vlv_get_backlight(dev, pipe); panel->backlight.level = intel_panel_compute_brightness(connector, val); panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) && @@ -1256,7 +1298,7 @@ static int vlv_setup_backlight(struct intel_connector *connector) return 0; } -int intel_panel_setup_backlight(struct drm_connector *connector) +int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1275,7 +1317,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector) /* set level and max in panel struct */ mutex_lock(&dev_priv->backlight_lock); - ret = dev_priv->display.setup_backlight(intel_connector); + ret = dev_priv->display.setup_backlight(intel_connector, pipe); mutex_unlock(&dev_priv->backlight_lock); if (ret) { @@ -1284,15 +1326,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector) return ret; } - intel_backlight_device_register(intel_connector); - panel->backlight.present = true; - DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, " - "sysfs interface %sregistered\n", + DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n", + connector->name, panel->backlight.enabled ? "enabled" : "disabled", - panel->backlight.level, panel->backlight.max, - panel->backlight.device ? "" : "not "); + panel->backlight.level, panel->backlight.max); return 0; } @@ -1303,7 +1342,6 @@ void intel_panel_destroy_backlight(struct drm_connector *connector) struct intel_panel *panel = &intel_connector->panel; panel->backlight.present = false; - intel_backlight_device_unregister(intel_connector); } /* Set up chip specific backlight functions */ @@ -1366,3 +1404,19 @@ void intel_panel_fini(struct intel_panel *panel) drm_mode_destroy(intel_connector->base.dev, panel->downclock_mode); } + +void intel_backlight_register(struct drm_device *dev) +{ + struct intel_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) + intel_backlight_device_register(connector); +} + +void intel_backlight_unregister(struct drm_device *dev) +{ + struct intel_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) + intel_backlight_device_unregister(connector); +} diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 300d7e503f96..964b28e3c630 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4451,7 +4451,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) dev_priv->rps.min_freq_softlimit); if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) - & GENFREQSTATUS) == 0, 5)) + & GENFREQSTATUS) == 0, 100)) DRM_ERROR("timed out waiting for Punit\n"); vlv_force_gfx_clock(dev_priv, false); @@ -4504,14 +4504,8 @@ void valleyview_set_rps(struct drm_device *dev, u8 val) "Odd GPU freq value\n")) val &= ~1; - if (val != dev_priv->rps.cur_freq) { - DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", - vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), - dev_priv->rps.cur_freq, - vlv_gpu_freq(dev_priv, val), val); - + if (val != dev_priv->rps.cur_freq) vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); - } I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); @@ -4519,26 +4513,6 @@ void valleyview_set_rps(struct drm_device *dev, u8 val) trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); } -static void gen8_disable_rps_interrupts(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP); - I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) & - ~dev_priv->pm_rps_events); - /* Complete PM interrupt masking here doesn't race with the rps work - * item again unmasking PM interrupts because that is using a different - * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in - * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which - * gen8_enable_rps will clean up. */ - - spin_lock_irq(&dev_priv->irq_lock); - dev_priv->rps.pm_iir = 0; - spin_unlock_irq(&dev_priv->irq_lock); - - I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); -} - static void gen9_disable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4546,36 +4520,12 @@ static void gen9_disable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_CONTROL, 0); } -static void gen6_disable_rps_interrupts(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); - I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & - ~dev_priv->pm_rps_events); - /* Complete PM interrupt masking here doesn't race with the rps work - * item again unmasking PM interrupts because that is using a different - * register (PMIMR) to mask PM interrupts. The only risk is in leaving - * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ - - spin_lock_irq(&dev_priv->irq_lock); - dev_priv->rps.pm_iir = 0; - spin_unlock_irq(&dev_priv->irq_lock); - - I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); -} - static void gen6_disable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RPNSWREQ, 1 << 31); - - if (IS_BROADWELL(dev)) - gen8_disable_rps_interrupts(dev); - else - gen6_disable_rps_interrupts(dev); } static void cherryview_disable_rps(struct drm_device *dev) @@ -4583,8 +4533,6 @@ static void cherryview_disable_rps(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(GEN6_RC_CONTROL, 0); - - gen8_disable_rps_interrupts(dev); } static void valleyview_disable_rps(struct drm_device *dev) @@ -4598,8 +4546,6 @@ static void valleyview_disable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_CONTROL, 0); gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); - - gen6_disable_rps_interrupts(dev); } static void intel_print_rc6_info(struct drm_device *dev, u32 mode) @@ -4663,47 +4609,46 @@ int intel_enable_rc6(const struct drm_device *dev) return i915.enable_rc6; } -static void gen8_enable_rps_interrupts(struct drm_device *dev) +static void gen6_init_rps_frequencies(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t rp_state_cap; + u32 ddcc_status = 0; + int ret; - spin_lock_irq(&dev_priv->irq_lock); - WARN_ON(dev_priv->rps.pm_iir); - gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); - I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); - spin_unlock_irq(&dev_priv->irq_lock); -} - -static void gen6_enable_rps_interrupts(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - spin_lock_irq(&dev_priv->irq_lock); - WARN_ON(dev_priv->rps.pm_iir); - gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); - I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); - spin_unlock_irq(&dev_priv->irq_lock); -} - -static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap) -{ + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); /* All of these values are in units of 50MHz */ dev_priv->rps.cur_freq = 0; - /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */ - dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; + /* static values from HW: RP0 > RP1 > RPn (min_freq) */ dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; + dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; - /* XXX: only BYT has a special efficient freq */ - dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; /* hw_max = RP0 until we check for overclocking */ dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; + dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + ret = sandybridge_pcode_read(dev_priv, + HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, + &ddcc_status); + if (0 == ret) + dev_priv->rps.efficient_freq = + (ddcc_status >> 8) & 0xff; + } + /* Preserve min/max settings in case of re-init */ if (dev_priv->rps.max_freq_softlimit == 0) dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; - if (dev_priv->rps.min_freq_softlimit == 0) - dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; + if (dev_priv->rps.min_freq_softlimit == 0) { + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + dev_priv->rps.min_freq_softlimit = + /* max(RPe, 450 MHz) */ + max(dev_priv->rps.efficient_freq, (u8) 9); + else + dev_priv->rps.min_freq_softlimit = + dev_priv->rps.min_freq; + } } static void gen9_enable_rps(struct drm_device *dev) @@ -4749,7 +4694,7 @@ static void gen8_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - uint32_t rc6_mask = 0, rp_state_cap; + uint32_t rc6_mask = 0; int unused; /* 1a: Software RC state - RC0 */ @@ -4762,8 +4707,8 @@ static void gen8_enable_rps(struct drm_device *dev) /* 2a: Disable RC states. */ I915_WRITE(GEN6_RC_CONTROL, 0); - rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - parse_rp_state_cap(dev_priv, rp_state_cap); + /* Initialize rps frequencies */ + gen6_init_rps_frequencies(dev); /* 2b: Program RC6 thresholds.*/ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); @@ -4821,9 +4766,8 @@ static void gen8_enable_rps(struct drm_device *dev) /* 6: Ring frequency + overclocking (our driver does this later */ - gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8); - - gen8_enable_rps_interrupts(dev); + dev_priv->rps.power = HIGH_POWER; /* force a reset */ + gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); } @@ -4832,7 +4776,6 @@ static void gen6_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - u32 rp_state_cap; u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; u32 gtfifodbg; int rc6_mode; @@ -4856,9 +4799,8 @@ static void gen6_enable_rps(struct drm_device *dev) gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); - rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - - parse_rp_state_cap(dev_priv, rp_state_cap); + /* Initialize rps frequencies */ + gen6_init_rps_frequencies(dev); /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -4921,8 +4863,6 @@ static void gen6_enable_rps(struct drm_device *dev) dev_priv->rps.power = HIGH_POWER; /* force a reset */ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); - gen6_enable_rps_interrupts(dev); - rc6vids = 0; ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); if (IS_GEN6(dev) && ret) { @@ -4975,9 +4915,9 @@ static void __gen6_update_ring_freq(struct drm_device *dev) * to use for memory access. We do this by specifying the IA frequency * the PCU should use as a reference to determine the ring frequency. */ - for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit; + for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq; gpu_freq--) { - int diff = dev_priv->rps.max_freq_softlimit - gpu_freq; + int diff = dev_priv->rps.max_freq - gpu_freq; unsigned int ia_freq = 0, ring_freq = 0; if (INTEL_INFO(dev)->gen >= 8) { @@ -5132,12 +5072,15 @@ static void cherryview_setup_pctx(struct drm_device *dev) pcbr = I915_READ(VLV_PCBR); if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { + DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); paddr = (dev_priv->mm.stolen_base + (gtt->stolen_size - pctx_size)); pctx_paddr = (paddr & (~4095)); I915_WRITE(VLV_PCBR, pctx_paddr); } + + DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); } static void valleyview_setup_pctx(struct drm_device *dev) @@ -5163,6 +5106,8 @@ static void valleyview_setup_pctx(struct drm_device *dev) goto out; } + DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); + /* * From the Gunit register HAS: * The Gfx driver is expected to program this register and ensure @@ -5181,6 +5126,7 @@ static void valleyview_setup_pctx(struct drm_device *dev) I915_WRITE(VLV_PCBR, pctx_paddr); out: + DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); dev_priv->vlv_pctx = pctx; } @@ -5217,7 +5163,7 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) dev_priv->mem_freq = 1333; break; } - DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); + DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; @@ -5259,7 +5205,10 @@ static void cherryview_init_gt_powersave(struct drm_device *dev) mutex_lock(&dev_priv->rps.hw_lock); - val = vlv_punit_read(dev_priv, CCK_FUSE_REG); + mutex_lock(&dev_priv->dpio_lock); + val = vlv_cck_read(dev_priv, CCK_FUSE_REG); + mutex_unlock(&dev_priv->dpio_lock); + switch ((val >> 2) & 0x7) { case 0: case 1: @@ -5283,7 +5232,7 @@ static void cherryview_init_gt_powersave(struct drm_device *dev) dev_priv->mem_freq = 1600; break; } - DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); + DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; @@ -5369,8 +5318,6 @@ static void cherryview_enable_rps(struct drm_device *dev) /* For now we assume BIOS is allocating and populating the PCBR */ pcbr = I915_READ(VLV_PCBR); - DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr); - /* 3: Enable RC6 */ if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && (pcbr >> VLV_PCBR_ADDR_SHIFT)) @@ -5400,7 +5347,10 @@ static void cherryview_enable_rps(struct drm_device *dev) val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); + /* RPS code assumes GPLL is used */ + WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); + + DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no"); DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); dev_priv->rps.cur_freq = (val >> 8) & 0xff; @@ -5414,8 +5364,6 @@ static void cherryview_enable_rps(struct drm_device *dev) valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); - gen8_enable_rps_interrupts(dev); - gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); } @@ -5480,7 +5428,10 @@ static void valleyview_enable_rps(struct drm_device *dev) val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); + /* RPS code assumes GPLL is used */ + WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); + + DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no"); DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); dev_priv->rps.cur_freq = (val >> 8) & 0xff; @@ -5494,8 +5445,6 @@ static void valleyview_enable_rps(struct drm_device *dev) valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); - gen6_enable_rps_interrupts(dev); - gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); } @@ -6242,6 +6191,20 @@ void intel_cleanup_gt_powersave(struct drm_device *dev) valleyview_cleanup_gt_powersave(dev); } +static void gen6_suspend_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + /* + * TODO: disable RPS interrupts on GEN9+ too once RPS support + * is added for it. + */ + if (INTEL_INFO(dev)->gen < 9) + gen6_disable_rps_interrupts(dev); +} + /** * intel_suspend_gt_powersave - suspend PM work and helper threads * @dev: drm device @@ -6254,12 +6217,10 @@ void intel_suspend_gt_powersave(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - /* Interrupts should be disabled already to avoid re-arming. */ - WARN_ON(intel_irqs_enabled(dev_priv)); - - flush_delayed_work(&dev_priv->rps.delayed_resume_work); + if (INTEL_INFO(dev)->gen < 6) + return; - cancel_work_sync(&dev_priv->rps.work); + gen6_suspend_rps(dev); /* Force GPU to min freq during suspend */ gen6_rps_idle(dev_priv); @@ -6269,9 +6230,6 @@ void intel_disable_gt_powersave(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - /* Interrupts should be disabled already to avoid re-arming. */ - WARN_ON(intel_irqs_enabled(dev_priv)); - if (IS_IRONLAKE_M(dev)) { ironlake_disable_drps(dev); ironlake_disable_rc6(dev); @@ -6287,6 +6245,7 @@ void intel_disable_gt_powersave(struct drm_device *dev) valleyview_disable_rps(dev); else gen6_disable_rps(dev); + dev_priv->rps.enabled = false; mutex_unlock(&dev_priv->rps.hw_lock); } @@ -6301,6 +6260,13 @@ static void intel_gen6_powersave_work(struct work_struct *work) mutex_lock(&dev_priv->rps.hw_lock); + /* + * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is + * added for it. + */ + if (INTEL_INFO(dev)->gen < 9) + gen6_reset_rps_interrupts(dev); + if (IS_CHERRYVIEW(dev)) { cherryview_enable_rps(dev); } else if (IS_VALLEYVIEW(dev)) { @@ -6315,6 +6281,10 @@ static void intel_gen6_powersave_work(struct work_struct *work) __gen6_update_ring_freq(dev); } dev_priv->rps.enabled = true; + + if (INTEL_INFO(dev)->gen < 9) + gen6_enable_rps_interrupts(dev); + mutex_unlock(&dev_priv->rps.hw_lock); intel_runtime_pm_put(dev_priv); @@ -6353,8 +6323,11 @@ void intel_reset_gt_powersave(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + if (INTEL_INFO(dev)->gen < 6) + return; + + gen6_suspend_rps(dev); dev_priv->rps.enabled = false; - intel_enable_gt_powersave(dev); } static void ibx_init_clock_gating(struct drm_device *dev) @@ -6533,11 +6506,6 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(_3D_CHICKEN, _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); - /* WaSetupGtModeTdRowDispatch:snb */ - if (IS_SNB_GT1(dev)) - I915_WRITE(GEN6_GT_MODE, - _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); - /* WaDisable_RenderCache_OperationalFlush:snb */ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); @@ -6550,7 +6518,7 @@ static void gen6_init_clock_gating(struct drm_device *dev) * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). */ I915_WRITE(GEN6_GT_MODE, - GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); + _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); ilk_init_lp_watermarks(dev); @@ -6748,7 +6716,7 @@ static void haswell_init_clock_gating(struct drm_device *dev) * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). */ I915_WRITE(GEN7_GT_MODE, - GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); + _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); /* WaSwitchSolVfFArbitrationPriority:hsw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); @@ -6845,7 +6813,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). */ I915_WRITE(GEN7_GT_MODE, - GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); + _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); snpcr &= ~GEN6_MBC_SNPCR_MASK; @@ -6958,18 +6926,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev) /* WaDisableSDEUnitClockGating:chv */ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); - - /* WaDisableGunitClockGating:chv (pre-production hw) */ - I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) | - GINT_DIS); - - /* WaDisableFfDopClockGating:chv (pre-production hw) */ - I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, - _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE)); - - /* WaDisableDopClockGating:chv (pre-production hw) */ - I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | - GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); } static void g4x_init_clock_gating(struct drm_device *dev) @@ -7140,7 +7096,7 @@ void intel_init_pm(struct drm_device *dev) i915_ironlake_get_mem_freq(dev); /* For FIFO watermark updates */ - if (IS_GEN9(dev)) { + if (INTEL_INFO(dev)->gen >= 9) { skl_setup_wm_latency(dev); dev_priv->display.init_clock_gating = gen9_init_clock_gating; @@ -7227,7 +7183,7 @@ void intel_init_pm(struct drm_device *dev) } } -int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) +int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) { WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -7237,8 +7193,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) } I915_WRITE(GEN6_PCODE_DATA, *val); - if (INTEL_INFO(dev_priv)->gen >= 9) - I915_WRITE(GEN9_PCODE_DATA1, 0); + I915_WRITE(GEN6_PCODE_DATA1, 0); I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, @@ -7253,7 +7208,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) return 0; } -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) +int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val) { WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -7276,99 +7231,66 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) return 0; } -static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) +static int vlv_gpu_freq_div(unsigned int czclk_freq) { - int div; - - /* 4 x czclk */ - switch (dev_priv->mem_freq) { - case 800: - div = 10; - break; - case 1066: - div = 12; - break; - case 1333: - div = 16; - break; + switch (czclk_freq) { + case 200: + return 10; + case 267: + return 12; + case 320: + case 333: + return 16; + case 400: + return 20; default: return -1; } +} - return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div); +static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) +{ + int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4); + + div = vlv_gpu_freq_div(czclk_freq); + if (div < 0) + return div; + + return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div); } static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) { - int mul; + int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4); - /* 4 x czclk */ - switch (dev_priv->mem_freq) { - case 800: - mul = 10; - break; - case 1066: - mul = 12; - break; - case 1333: - mul = 16; - break; - default: - return -1; - } + mul = vlv_gpu_freq_div(czclk_freq); + if (mul < 0) + return mul; - return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; + return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6; } static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) { - int div, freq; + int div, czclk_freq = dev_priv->rps.cz_freq; - switch (dev_priv->rps.cz_freq) { - case 200: - div = 5; - break; - case 267: - div = 6; - break; - case 320: - case 333: - case 400: - div = 8; - break; - default: - return -1; - } + div = vlv_gpu_freq_div(czclk_freq) / 2; + if (div < 0) + return div; - freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2); - - return freq; + return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2; } static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) { - int mul, opcode; + int mul, czclk_freq = dev_priv->rps.cz_freq; - switch (dev_priv->rps.cz_freq) { - case 200: - mul = 5; - break; - case 267: - mul = 6; - break; - case 320: - case 333: - case 400: - mul = 8; - break; - default: - return -1; - } + mul = vlv_gpu_freq_div(czclk_freq) / 2; + if (mul < 0) + return mul; /* CHV needs even values */ - opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2); - - return opcode; + return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2; } int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c new file mode 100644 index 000000000000..716b8a961eea --- /dev/null +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -0,0 +1,481 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * DOC: Panel Self Refresh (PSR/SRD) + * + * Since Haswell Display controller supports Panel Self-Refresh on display + * panels witch have a remote frame buffer (RFB) implemented according to PSR + * spec in eDP1.3. PSR feature allows the display to go to lower standby states + * when system is idle but display is on as it eliminates display refresh + * request to DDR memory completely as long as the frame buffer for that + * display is unchanged. + * + * Panel Self Refresh must be supported by both Hardware (source) and + * Panel (sink). + * + * PSR saves power by caching the framebuffer in the panel RFB, which allows us + * to power down the link and memory controller. For DSI panels the same idea + * is called "manual mode". + * + * The implementation uses the hardware-based PSR support which automatically + * enters/exits self-refresh mode. The hardware takes care of sending the + * required DP aux message and could even retrain the link (that part isn't + * enabled yet though). The hardware also keeps track of any frontbuffer + * changes to know when to exit self-refresh mode again. Unfortunately that + * part doesn't work too well, hence why the i915 PSR support uses the + * software frontbuffer tracking to make sure it doesn't miss a screen + * update. For this integration intel_psr_invalidate() and intel_psr_flush() + * get called by the frontbuffer tracking code. Note that because of locking + * issues the self-refresh re-enable code is done from a work queue, which + * must be correctly synchronized/cancelled when shutting down the pipe." + */ + +#include <drm/drmP.h> + +#include "intel_drv.h" +#include "i915_drv.h" + +static bool is_edp_psr(struct intel_dp *intel_dp) +{ + return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; +} + +bool intel_psr_is_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!HAS_PSR(dev)) + return false; + + return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; +} + +static void intel_psr_write_vsc(struct intel_dp *intel_dp, + struct edp_vsc_psr *vsc_psr) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); + u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); + u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); + uint32_t *data = (uint32_t *) vsc_psr; + unsigned int i; + + /* As per BSPec (Pipe Video Data Island Packet), we need to disable + the video DIP being updated before program video DIP data buffer + registers for DIP being updated. */ + I915_WRITE(ctl_reg, 0); + POSTING_READ(ctl_reg); + + for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { + if (i < sizeof(struct edp_vsc_psr)) + I915_WRITE(data_reg + i, *data++); + else + I915_WRITE(data_reg + i, 0); + } + + I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); + POSTING_READ(ctl_reg); +} + +static void intel_psr_setup_vsc(struct intel_dp *intel_dp) +{ + struct edp_vsc_psr psr_vsc; + + /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ + memset(&psr_vsc, 0, sizeof(psr_vsc)); + psr_vsc.sdp_header.HB0 = 0; + psr_vsc.sdp_header.HB1 = 0x7; + psr_vsc.sdp_header.HB2 = 0x2; + psr_vsc.sdp_header.HB3 = 0x8; + intel_psr_write_vsc(intel_dp, &psr_vsc); +} + +static void intel_psr_enable_sink(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t aux_clock_divider; + int precharge = 0x3; + bool only_standby = false; + static const uint8_t aux_msg[] = { + [0] = DP_AUX_NATIVE_WRITE << 4, + [1] = DP_SET_POWER >> 8, + [2] = DP_SET_POWER & 0xff, + [3] = 1 - 1, + [4] = DP_SET_POWER_D0, + }; + int i; + + BUILD_BUG_ON(sizeof(aux_msg) > 20); + + aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); + + if (IS_BROADWELL(dev) && dig_port->port != PORT_A) + only_standby = true; + + /* Enable PSR in sink */ + if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); + else + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); + + /* Setup AUX registers */ + for (i = 0; i < sizeof(aux_msg); i += 4) + I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i, + intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); + + I915_WRITE(EDP_PSR_AUX_CTL(dev), + DP_AUX_CH_CTL_TIME_OUT_400us | + (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); +} + +static void intel_psr_enable_source(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t max_sleep_time = 0x1f; + uint32_t idle_frames = 1; + uint32_t val = 0x0; + const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; + bool only_standby = false; + + if (IS_BROADWELL(dev) && dig_port->port != PORT_A) + only_standby = true; + + if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) { + val |= EDP_PSR_LINK_STANDBY; + val |= EDP_PSR_TP2_TP3_TIME_0us; + val |= EDP_PSR_TP1_TIME_0us; + val |= EDP_PSR_SKIP_AUX_EXIT; + val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0; + } else + val |= EDP_PSR_LINK_DISABLE; + + I915_WRITE(EDP_PSR_CTL(dev), val | + (IS_BROADWELL(dev) ? 0 : link_entry_time) | + max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | + idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | + EDP_PSR_ENABLE); +} + +static bool intel_psr_match_conditions(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dig_port->base.base.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + lockdep_assert_held(&dev_priv->psr.lock); + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); + + dev_priv->psr.source_ok = false; + + if (IS_HASWELL(dev) && dig_port->port != PORT_A) { + DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); + return false; + } + + if (!i915.enable_psr) { + DRM_DEBUG_KMS("PSR disable by flag\n"); + return false; + } + + /* Below limitations aren't valid for Broadwell */ + if (IS_BROADWELL(dev)) + goto out; + + if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & + S3D_ENABLE) { + DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); + return false; + } + + if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { + DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); + return false; + } + + out: + dev_priv->psr.source_ok = true; + return true; +} + +static void intel_psr_do_enable(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); + WARN_ON(dev_priv->psr.active); + lockdep_assert_held(&dev_priv->psr.lock); + + /* Enable/Re-enable PSR on the host */ + intel_psr_enable_source(intel_dp); + + dev_priv->psr.active = true; +} + +/** + * intel_psr_enable - Enable PSR + * @intel_dp: Intel DP + * + * This function can only be called after the pipe is fully trained and enabled. + */ +void intel_psr_enable(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!HAS_PSR(dev)) { + DRM_DEBUG_KMS("PSR not supported on this platform\n"); + return; + } + + if (!is_edp_psr(intel_dp)) { + DRM_DEBUG_KMS("PSR not supported by this panel\n"); + return; + } + + mutex_lock(&dev_priv->psr.lock); + if (dev_priv->psr.enabled) { + DRM_DEBUG_KMS("PSR already in use\n"); + goto unlock; + } + + if (!intel_psr_match_conditions(intel_dp)) + goto unlock; + + dev_priv->psr.busy_frontbuffer_bits = 0; + + intel_psr_setup_vsc(intel_dp); + + /* Avoid continuous PSR exit by masking memup and hpd */ + I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | + EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); + + /* Enable PSR on the panel */ + intel_psr_enable_sink(intel_dp); + + dev_priv->psr.enabled = intel_dp; +unlock: + mutex_unlock(&dev_priv->psr.lock); +} + +/** + * intel_psr_disable - Disable PSR + * @intel_dp: Intel DP + * + * This function needs to be called before disabling pipe. + */ +void intel_psr_disable(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + mutex_lock(&dev_priv->psr.lock); + if (!dev_priv->psr.enabled) { + mutex_unlock(&dev_priv->psr.lock); + return; + } + + if (dev_priv->psr.active) { + I915_WRITE(EDP_PSR_CTL(dev), + I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); + + /* Wait till PSR is idle */ + if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & + EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) + DRM_ERROR("Timed out waiting for PSR Idle State\n"); + + dev_priv->psr.active = false; + } else { + WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); + } + + dev_priv->psr.enabled = NULL; + mutex_unlock(&dev_priv->psr.lock); + + cancel_delayed_work_sync(&dev_priv->psr.work); +} + +static void intel_psr_work(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), psr.work.work); + struct intel_dp *intel_dp = dev_priv->psr.enabled; + + /* We have to make sure PSR is ready for re-enable + * otherwise it keeps disabled until next full enable/disable cycle. + * PSR might take some time to get fully disabled + * and be ready for re-enable. + */ + if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) & + EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { + DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); + return; + } + + mutex_lock(&dev_priv->psr.lock); + intel_dp = dev_priv->psr.enabled; + + if (!intel_dp) + goto unlock; + + /* + * The delayed work can race with an invalidate hence we need to + * recheck. Since psr_flush first clears this and then reschedules we + * won't ever miss a flush when bailing out here. + */ + if (dev_priv->psr.busy_frontbuffer_bits) + goto unlock; + + intel_psr_do_enable(intel_dp); +unlock: + mutex_unlock(&dev_priv->psr.lock); +} + +static void intel_psr_exit(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->psr.active) { + u32 val = I915_READ(EDP_PSR_CTL(dev)); + + WARN_ON(!(val & EDP_PSR_ENABLE)); + + I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); + + dev_priv->psr.active = false; + } + +} + +/** + * intel_psr_invalidate - Invalidade PSR + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * Since the hardware frontbuffer tracking has gaps we need to integrate + * with the software frontbuffer tracking. This function gets called every + * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be + * disabled if the frontbuffer mask contains a buffer relevant to PSR. + * + * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." + */ +void intel_psr_invalidate(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + enum pipe pipe; + + mutex_lock(&dev_priv->psr.lock); + if (!dev_priv->psr.enabled) { + mutex_unlock(&dev_priv->psr.lock); + return; + } + + crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; + pipe = to_intel_crtc(crtc)->pipe; + + intel_psr_exit(dev); + + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); + + dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; + mutex_unlock(&dev_priv->psr.lock); +} + +/** + * intel_psr_flush - Flush PSR + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * Since the hardware frontbuffer tracking has gaps we need to integrate + * with the software frontbuffer tracking. This function gets called every + * time frontbuffer rendering has completed and flushed out to memory. PSR + * can be enabled again if no other frontbuffer relevant to PSR is dirty. + * + * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. + */ +void intel_psr_flush(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + enum pipe pipe; + + mutex_lock(&dev_priv->psr.lock); + if (!dev_priv->psr.enabled) { + mutex_unlock(&dev_priv->psr.lock); + return; + } + + crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; + pipe = to_intel_crtc(crtc)->pipe; + dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; + + /* + * On Haswell sprite plane updates don't result in a psr invalidating + * signal in the hardware. Which means we need to manually fake this in + * software for all flushes, not just when we've seen a preceding + * invalidation through frontbuffer rendering. + */ + if (IS_HASWELL(dev) && + (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe))) + intel_psr_exit(dev); + + if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) + schedule_delayed_work(&dev_priv->psr.work, + msecs_to_jiffies(100)); + mutex_unlock(&dev_priv->psr.lock); +} + +/** + * intel_psr_init - Init basic PSR work and mutex. + * @dev: DRM device + * + * This function is called only once at driver load to initialize basic + * PSR stuff. + */ +void intel_psr_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); + mutex_init(&dev_priv->psr.lock); +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f457146ff6a4..c7bc93d28d84 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring, flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; /* * TLB invalidate requires a post-sync write. */ flags |= PIPE_CONTROL_QW_WRITE; flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; + /* Workaround: we must issue a pipe_control with CS-stall bit * set before a pipe_control command that has the state cache * invalidate bit set. */ @@ -589,14 +592,10 @@ static int init_ring_common(struct intel_engine_cs *ring) goto out; } - if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) - i915_kernel_lost_context(ring->dev); - else { - ringbuf->head = I915_READ_HEAD(ring); - ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; - ringbuf->space = intel_ring_space(ringbuf); - ringbuf->last_retired_head = -1; - } + ringbuf->head = I915_READ_HEAD(ring); + ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; + ringbuf->space = intel_ring_space(ringbuf); + ringbuf->last_retired_head = -1; memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); @@ -665,7 +664,8 @@ err: return ret; } -static int intel_ring_workarounds_emit(struct intel_engine_cs *ring) +static int intel_ring_workarounds_emit(struct intel_engine_cs *ring, + struct intel_context *ctx) { int ret, i; struct drm_device *dev = ring->dev; @@ -704,7 +704,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring) } static int wa_add(struct drm_i915_private *dev_priv, - const u32 addr, const u32 val, const u32 mask) + const u32 addr, const u32 mask, const u32 val) { const u32 idx = dev_priv->workarounds.count; @@ -720,22 +720,25 @@ static int wa_add(struct drm_i915_private *dev_priv, return 0; } -#define WA_REG(addr, val, mask) { \ - const int r = wa_add(dev_priv, (addr), (val), (mask)); \ +#define WA_REG(addr, mask, val) { \ + const int r = wa_add(dev_priv, (addr), (mask), (val)); \ if (r) \ return r; \ } #define WA_SET_BIT_MASKED(addr, mask) \ - WA_REG(addr, _MASKED_BIT_ENABLE(mask), (mask) & 0xffff) + WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) #define WA_CLR_BIT_MASKED(addr, mask) \ - WA_REG(addr, _MASKED_BIT_DISABLE(mask), (mask) & 0xffff) + WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask)) -#define WA_SET_BIT(addr, mask) WA_REG(addr, I915_READ(addr) | (mask), mask) -#define WA_CLR_BIT(addr, mask) WA_REG(addr, I915_READ(addr) & ~(mask), mask) +#define WA_SET_FIELD_MASKED(addr, mask, value) \ + WA_REG(addr, mask, _MASKED_FIELD(mask, value)) -#define WA_WRITE(addr, val) WA_REG(addr, val, 0xffffffff) +#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask)) +#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask)) + +#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) static int bdw_init_workarounds(struct intel_engine_cs *ring) { @@ -776,8 +779,9 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring) * disable bit, which we don't touch here, but it's good * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). */ - WA_SET_BIT_MASKED(GEN7_GT_MODE, - GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); + WA_SET_FIELD_MASKED(GEN7_GT_MODE, + GEN6_WIZ_HASHING_MASK, + GEN6_WIZ_HASHING_16x4); return 0; } @@ -788,25 +792,25 @@ static int chv_init_workarounds(struct intel_engine_cs *ring) struct drm_i915_private *dev_priv = dev->dev_private; /* WaDisablePartialInstShootdown:chv */ - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, - PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); - /* WaDisableThreadStallDopClockGating:chv */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, - STALL_DOP_GATING_DISABLE); - - /* WaDisableDopClockGating:chv (pre-production hw) */ - WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, - DOP_CLOCK_GATING_DISABLE); + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE | + STALL_DOP_GATING_DISABLE); - /* WaDisableSamplerPowerBypass:chv (pre-production hw) */ - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, - GEN8_SAMPLER_POWER_BYPASS_DIS); + /* Use Force Non-Coherent whenever executing a 3D context. This is a + * workaround for a possible hang in the unlikely event a TLB + * invalidation occurs during a PSD flush. + */ + /* WaForceEnableNonCoherent:chv */ + /* WaHdcDisableFetchWhenMasked:chv */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FORCE_NON_COHERENT | + HDC_DONOT_FETCH_MEM_WHEN_MASKED); return 0; } -static int init_workarounds_ring(struct intel_engine_cs *ring) +int init_workarounds_ring(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1721,13 +1725,42 @@ static int init_phys_status_page(struct intel_engine_cs *ring) return 0; } -void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) +void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) { - if (!ringbuf->obj) - return; - iounmap(ringbuf->virtual_start); + ringbuf->virtual_start = NULL; i915_gem_object_ggtt_unpin(ringbuf->obj); +} + +int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, + struct intel_ringbuffer *ringbuf) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_gem_object *obj = ringbuf->obj; + int ret; + + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); + if (ret) + return ret; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) { + i915_gem_object_ggtt_unpin(obj); + return ret; + } + + ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + + i915_gem_obj_ggtt_offset(obj), ringbuf->size); + if (ringbuf->virtual_start == NULL) { + i915_gem_object_ggtt_unpin(obj); + return -EINVAL; + } + + return 0; +} + +void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) +{ drm_gem_object_unreference(&ringbuf->obj->base); ringbuf->obj = NULL; } @@ -1735,12 +1768,7 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) int intel_alloc_ringbuffer_obj(struct drm_device *dev, struct intel_ringbuffer *ringbuf) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; - int ret; - - if (ringbuf->obj) - return 0; obj = NULL; if (!HAS_LLC(dev)) @@ -1753,30 +1781,9 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev, /* mark ring buffers as read-only from GPU side by default */ obj->gt_ro = 1; - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); - if (ret) - goto err_unref; - - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - goto err_unpin; - - ringbuf->virtual_start = - ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), - ringbuf->size); - if (ringbuf->virtual_start == NULL) { - ret = -EINVAL; - goto err_unpin; - } - ringbuf->obj = obj; - return 0; -err_unpin: - i915_gem_object_ggtt_unpin(obj); -err_unref: - drm_gem_object_unreference(&obj->base); - return ret; + return 0; } static int intel_init_ring_buffer(struct drm_device *dev, @@ -1813,10 +1820,21 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto error; } - ret = intel_alloc_ringbuffer_obj(dev, ringbuf); - if (ret) { - DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); - goto error; + if (ringbuf->obj == NULL) { + ret = intel_alloc_ringbuffer_obj(dev, ringbuf); + if (ret) { + DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", + ring->name, ret); + goto error; + } + + ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); + if (ret) { + DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", + ring->name, ret); + intel_destroy_ringbuffer_obj(ringbuf); + goto error; + } } /* Workaround an erratum on the i830 which causes a hang if @@ -1857,6 +1875,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) intel_stop_ring_buffer(ring); WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); + intel_unpin_ringbuffer_obj(ringbuf); intel_destroy_ringbuffer_obj(ringbuf); ring->preallocated_lazy_request = NULL; ring->outstanding_lazy_seqno = 0; @@ -1942,13 +1961,6 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) break; } - if (!drm_core_check_feature(dev, DRIVER_MODESET) && - dev->primary->master) { - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - if (master_priv->sarea_priv) - master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - } - msleep(1); if (dev_priv->mm.interruptible && signal_pending(current)) { @@ -2439,91 +2451,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev) return intel_init_ring_buffer(dev, ring); } -int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[RCS]; - struct intel_ringbuffer *ringbuf = ring->buffer; - int ret; - - if (ringbuf == NULL) { - ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); - if (!ringbuf) - return -ENOMEM; - ring->buffer = ringbuf; - } - - ring->name = "render ring"; - ring->id = RCS; - ring->mmio_base = RENDER_RING_BASE; - - if (INTEL_INFO(dev)->gen >= 6) { - /* non-kms not supported on gen6+ */ - ret = -ENODEV; - goto err_ringbuf; - } - - /* Note: gem is not supported on gen5/ilk without kms (the corresponding - * gem_init ioctl returns with -ENODEV). Hence we do not need to set up - * the special gen5 functions. */ - ring->add_request = i9xx_add_request; - if (INTEL_INFO(dev)->gen < 4) - ring->flush = gen2_render_ring_flush; - else - ring->flush = gen4_render_ring_flush; - ring->get_seqno = ring_get_seqno; - ring->set_seqno = ring_set_seqno; - if (IS_GEN2(dev)) { - ring->irq_get = i8xx_ring_get_irq; - ring->irq_put = i8xx_ring_put_irq; - } else { - ring->irq_get = i9xx_ring_get_irq; - ring->irq_put = i9xx_ring_put_irq; - } - ring->irq_enable_mask = I915_USER_INTERRUPT; - ring->write_tail = ring_write_tail; - if (INTEL_INFO(dev)->gen >= 4) - ring->dispatch_execbuffer = i965_dispatch_execbuffer; - else if (IS_I830(dev) || IS_845G(dev)) - ring->dispatch_execbuffer = i830_dispatch_execbuffer; - else - ring->dispatch_execbuffer = i915_dispatch_execbuffer; - ring->init = init_render_ring; - ring->cleanup = render_ring_cleanup; - - ring->dev = dev; - INIT_LIST_HEAD(&ring->active_list); - INIT_LIST_HEAD(&ring->request_list); - - ringbuf->size = size; - ringbuf->effective_size = ringbuf->size; - if (IS_I830(ring->dev) || IS_845G(ring->dev)) - ringbuf->effective_size -= 2 * CACHELINE_BYTES; - - ringbuf->virtual_start = ioremap_wc(start, size); - if (ringbuf->virtual_start == NULL) { - DRM_ERROR("can not ioremap virtual address for" - " ring buffer\n"); - ret = -ENOMEM; - goto err_ringbuf; - } - - if (!I915_NEED_GFX_HWS(dev)) { - ret = init_phys_status_page(ring); - if (ret) - goto err_vstart; - } - - return 0; - -err_vstart: - iounmap(ringbuf->virtual_start); -err_ringbuf: - kfree(ringbuf); - ring->buffer = NULL; - return ret; -} - int intel_init_bsd_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 96479c89f4bd..fe426cff598b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -148,7 +148,8 @@ struct intel_engine_cs { int (*init)(struct intel_engine_cs *ring); - int (*init_context)(struct intel_engine_cs *ring); + int (*init_context)(struct intel_engine_cs *ring, + struct intel_context *ctx); void (*write_tail)(struct intel_engine_cs *ring, u32 value); @@ -235,6 +236,7 @@ struct intel_engine_cs { /* Execlists */ spinlock_t execlist_lock; struct list_head execlist_queue; + struct list_head execlist_retired_req_list; u8 next_context_status_buffer; u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ int (*emit_request)(struct intel_ringbuffer *ringbuf); @@ -381,6 +383,9 @@ intel_write_status_page(struct intel_engine_cs *ring, #define I915_GEM_HWS_SCRATCH_INDEX 0x30 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); +int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, + struct intel_ringbuffer *ringbuf); void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); int intel_alloc_ringbuffer_obj(struct drm_device *dev, struct intel_ringbuffer *ringbuf); @@ -424,6 +429,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev); u64 intel_ring_get_active_head(struct intel_engine_cs *ring); void intel_ring_setup_status_page(struct intel_engine_cs *ring); +int init_workarounds_ring(struct intel_engine_cs *ring); + static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) { return ringbuf->tail; @@ -441,7 +448,4 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno) ring->trace_irq_seqno = seqno; } -/* DRI warts */ -int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); - #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index dcbecffc6b5f..ac6da7102fbb 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -577,6 +577,23 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, power_well->data != PIPE_C); chv_set_pipe_power_well(dev_priv, power_well, true); + + if (power_well->data == PIPE_A) { + spin_lock_irq(&dev_priv->irq_lock); + valleyview_enable_display_irqs(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + /* + * During driver initialization/resume we can avoid restoring the + * part of the HW/SW state that will be inited anyway explicitly. + */ + if (dev_priv->power_domains.initializing) + return; + + intel_hpd_init(dev_priv); + + i915_redisable_vga_power_on(dev_priv->dev); + } } static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, @@ -586,35 +603,18 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, power_well->data != PIPE_B && power_well->data != PIPE_C); + if (power_well->data == PIPE_A) { + spin_lock_irq(&dev_priv->irq_lock); + valleyview_disable_display_irqs(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + } + chv_set_pipe_power_well(dev_priv, power_well, false); if (power_well->data == PIPE_A) vlv_power_sequencer_reset(dev_priv); } -static void check_power_well_state(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - bool enabled = power_well->ops->is_enabled(dev_priv, power_well); - - if (power_well->always_on || !i915.disable_power_well) { - if (!enabled) - goto mismatch; - - return; - } - - if (enabled != (power_well->count > 0)) - goto mismatch; - - return; - -mismatch: - WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", - power_well->name, power_well->always_on, enabled, - power_well->count, i915.disable_power_well); -} - /** * intel_display_power_get - grab a power domain reference * @dev_priv: i915 device instance @@ -646,8 +646,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv, power_well->ops->enable(dev_priv, power_well); power_well->hw_enabled = true; } - - check_power_well_state(dev_priv, power_well); } power_domains->domain_use_count[domain]++; @@ -686,8 +684,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, power_well->hw_enabled = false; power_well->ops->disable(dev_priv, power_well); } - - check_power_well_state(dev_priv, power_well); } mutex_unlock(&power_domains->lock); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 64076555153a..7d9c340f7693 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -1264,10 +1264,11 @@ intel_prepare_sprite_plane(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct drm_crtc *crtc = state->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_plane *intel_plane = to_intel_plane(plane); enum pipe pipe = intel_crtc->pipe; struct drm_framebuffer *fb = state->fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); + struct drm_i915_gem_object *old_obj = intel_plane->obj; int ret; if (old_obj != obj) { @@ -1302,7 +1303,7 @@ intel_commit_sprite_plane(struct drm_plane *plane, enum pipe pipe = intel_crtc->pipe; struct drm_framebuffer *fb = state->fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); + struct drm_i915_gem_object *old_obj = intel_plane->obj; int crtc_x, crtc_y; unsigned int crtc_w, crtc_h; uint32_t src_x, src_y, src_w, src_h; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 6a0c3fb2cbf0..46de8d75b4bf 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -43,23 +43,17 @@ static void assert_device_not_suspended(struct drm_i915_private *dev_priv) { - WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, - "Device suspended\n"); + WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, + "Device suspended\n"); } static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) { - u32 gt_thread_status_mask; - - if (IS_HASWELL(dev_priv->dev)) - gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; - else - gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; - /* w/a for a sporadic read returning 0 by waiting for the GT * thread to wake up. */ - if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) + if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & + GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) DRM_ERROR("GT thread status wait timed out\n"); } @@ -120,8 +114,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); /* WaRsForcewakeWaitTC0:ivb,hsw */ - if (INTEL_INFO(dev_priv->dev)->gen < 8) - __gen6_gt_wait_for_thread_c0(dev_priv); + __gen6_gt_wait_for_thread_c0(dev_priv); } static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) @@ -229,10 +222,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out: waiting for media to ack.\n"); } - - /* WaRsForcewakeWaitTC0:vlv */ - if (!IS_CHERRYVIEW(dev_priv->dev)) - __gen6_gt_wait_for_thread_c0(dev_priv); } static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, @@ -681,6 +670,38 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv) REG_RANGE((reg), 0x14000, 0x14400) || \ REG_RANGE((reg), 0x22000, 0x24000)) +#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ + REG_RANGE((reg), 0xB00, 0x2000) + +#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \ + (REG_RANGE((reg), 0x2000, 0x2700) || \ + REG_RANGE((reg), 0x3000, 0x4000) || \ + REG_RANGE((reg), 0x5200, 0x8000) || \ + REG_RANGE((reg), 0x8140, 0x8160) || \ + REG_RANGE((reg), 0x8300, 0x8500) || \ + REG_RANGE((reg), 0x8C00, 0x8D00) || \ + REG_RANGE((reg), 0xB000, 0xB480) || \ + REG_RANGE((reg), 0xE000, 0xE900) || \ + REG_RANGE((reg), 0x24400, 0x24800)) + +#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \ + (REG_RANGE((reg), 0x8130, 0x8140) || \ + REG_RANGE((reg), 0x8800, 0x8A00) || \ + REG_RANGE((reg), 0xD000, 0xD800) || \ + REG_RANGE((reg), 0x12000, 0x14000) || \ + REG_RANGE((reg), 0x1A000, 0x1EA00) || \ + REG_RANGE((reg), 0x30000, 0x40000)) + +#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \ + REG_RANGE((reg), 0x9400, 0x9800) + +#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \ + ((reg) < 0x40000 &&\ + !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \ + !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \ + !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ + !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) + static void ilk_dummy_write(struct drm_i915_private *dev_priv) { @@ -811,6 +832,45 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ REG_READ_FOOTER; \ } +#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \ + ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) + +#define __gen9_read(x) \ +static u##x \ +gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ + REG_READ_HEADER(x); \ + if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ + val = __raw_i915_read##x(dev_priv, reg); \ + } else { \ + unsigned fwengine = 0; \ + if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine = FORCEWAKE_RENDER; \ + } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine = FORCEWAKE_MEDIA; \ + } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine |= FORCEWAKE_RENDER; \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine |= FORCEWAKE_MEDIA; \ + } else { \ + if (dev_priv->uncore.fw_blittercount == 0) \ + fwengine = FORCEWAKE_BLITTER; \ + } \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ + val = __raw_i915_read##x(dev_priv, reg); \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ + } \ + REG_READ_FOOTER; \ +} + +__gen9_read(8) +__gen9_read(16) +__gen9_read(32) +__gen9_read(64) __chv_read(8) __chv_read(16) __chv_read(32) @@ -832,6 +892,7 @@ __gen4_read(16) __gen4_read(32) __gen4_read(64) +#undef __gen9_read #undef __chv_read #undef __vlv_read #undef __gen6_read @@ -969,6 +1030,69 @@ chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) REG_WRITE_FOOTER; \ } +static const u32 gen9_shadowed_regs[] = { + RING_TAIL(RENDER_RING_BASE), + RING_TAIL(GEN6_BSD_RING_BASE), + RING_TAIL(VEBOX_RING_BASE), + RING_TAIL(BLT_RING_BASE), + FORCEWAKE_BLITTER_GEN9, + FORCEWAKE_RENDER_GEN9, + FORCEWAKE_MEDIA_GEN9, + GEN6_RPNSWREQ, + GEN6_RC_VIDEO_FREQ, + /* TODO: Other registers are not yet used */ +}; + +static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) +{ + int i; + for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) + if (reg == gen9_shadowed_regs[i]) + return true; + + return false; +} + +#define __gen9_write(x) \ +static void \ +gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ + bool trace) { \ + REG_WRITE_HEADER; \ + if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \ + is_gen9_shadowed(dev_priv, reg)) { \ + __raw_i915_write##x(dev_priv, reg, val); \ + } else { \ + unsigned fwengine = 0; \ + if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine = FORCEWAKE_RENDER; \ + } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine = FORCEWAKE_MEDIA; \ + } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine |= FORCEWAKE_RENDER; \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine |= FORCEWAKE_MEDIA; \ + } else { \ + if (dev_priv->uncore.fw_blittercount == 0) \ + fwengine = FORCEWAKE_BLITTER; \ + } \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_get(dev_priv, \ + fwengine); \ + __raw_i915_write##x(dev_priv, reg, val); \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_put(dev_priv, \ + fwengine); \ + } \ + REG_WRITE_FOOTER; \ +} + +__gen9_write(8) +__gen9_write(16) +__gen9_write(32) +__gen9_write(64) __chv_write(8) __chv_write(16) __chv_write(32) @@ -994,6 +1118,7 @@ __gen4_write(16) __gen4_write(32) __gen4_write(64) +#undef __gen9_write #undef __chv_write #undef __gen8_write #undef __hsw_write @@ -1077,6 +1202,13 @@ void intel_uncore_init(struct drm_device *dev) switch (INTEL_INFO(dev)->gen) { default: + WARN_ON(1); + return; + case 9: + ASSIGN_WRITE_MMIO_VFUNCS(gen9); + ASSIGN_READ_MMIO_VFUNCS(gen9); + break; + case 8: if (IS_CHERRYVIEW(dev)) { ASSIGN_WRITE_MMIO_VFUNCS(chv); ASSIGN_READ_MMIO_VFUNCS(chv); @@ -1217,41 +1349,34 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev, return 0; } -static int i965_reset_complete(struct drm_device *dev) +static int i915_reset_complete(struct drm_device *dev) { u8 gdrst; - pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); - return (gdrst & GRDOM_RESET_ENABLE) == 0; + pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); + return (gdrst & GRDOM_RESET_STATUS) == 0; } -static int i965_do_reset(struct drm_device *dev) +static int i915_do_reset(struct drm_device *dev) { - int ret; - - /* FIXME: i965g/gm need a display save/restore for gpu reset. */ - return -ENODEV; - - /* - * Set the domains we want to reset (GRDOM/bits 2 and 3) as - * well as the reset bit (GR/bit 0). Setting the GR bit - * triggers the reset; when done, the hardware will clear it. - */ - pci_write_config_byte(dev->pdev, I965_GDRST, - GRDOM_RENDER | GRDOM_RESET_ENABLE); - ret = wait_for(i965_reset_complete(dev), 500); - if (ret) - return ret; + /* assert reset for at least 20 usec */ + pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); + udelay(20); + pci_write_config_byte(dev->pdev, I915_GDRST, 0); - pci_write_config_byte(dev->pdev, I965_GDRST, - GRDOM_MEDIA | GRDOM_RESET_ENABLE); - - ret = wait_for(i965_reset_complete(dev), 500); - if (ret) - return ret; + return wait_for(i915_reset_complete(dev), 500); +} - pci_write_config_byte(dev->pdev, I965_GDRST, 0); +static int g4x_reset_complete(struct drm_device *dev) +{ + u8 gdrst; + pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); + return (gdrst & GRDOM_RESET_ENABLE) == 0; +} - return 0; +static int g33_do_reset(struct drm_device *dev) +{ + pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); + return wait_for(g4x_reset_complete(dev), 500); } static int g4x_do_reset(struct drm_device *dev) @@ -1259,9 +1384,9 @@ static int g4x_do_reset(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - pci_write_config_byte(dev->pdev, I965_GDRST, + pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RENDER | GRDOM_RESET_ENABLE); - ret = wait_for(i965_reset_complete(dev), 500); + ret = wait_for(g4x_reset_complete(dev), 500); if (ret) return ret; @@ -1269,9 +1394,9 @@ static int g4x_do_reset(struct drm_device *dev) I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); POSTING_READ(VDECCLK_GATE_D); - pci_write_config_byte(dev->pdev, I965_GDRST, + pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_MEDIA | GRDOM_RESET_ENABLE); - ret = wait_for(i965_reset_complete(dev), 500); + ret = wait_for(g4x_reset_complete(dev), 500); if (ret) return ret; @@ -1279,7 +1404,7 @@ static int g4x_do_reset(struct drm_device *dev) I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); POSTING_READ(VDECCLK_GATE_D); - pci_write_config_byte(dev->pdev, I965_GDRST, 0); + pci_write_config_byte(dev->pdev, I915_GDRST, 0); return 0; } @@ -1337,8 +1462,10 @@ int intel_gpu_reset(struct drm_device *dev) return ironlake_do_reset(dev); else if (IS_G4X(dev)) return g4x_do_reset(dev); - else if (IS_GEN4(dev)) - return i965_do_reset(dev); + else if (IS_G33(dev)) + return g33_do_reset(dev); + else if (INTEL_INFO(dev)->gen >= 3) + return i915_do_reset(dev); else return -ENODEV; } diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 82fb758a29bc..ab31848e92cf 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -6,6 +6,7 @@ config DRM_IMX select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) + depends on IMX_IPUV3_CORE help enable i.MX graphics support @@ -40,11 +41,11 @@ config DRM_IMX_LDB found on i.MX53 and i.MX6 processors. config DRM_IMX_IPUV3 - tristate "DRM Support for i.MX IPUv3" + tristate depends on DRM_IMX depends on IMX_IPUV3_CORE - help - Choose this if you have a i.MX5 or i.MX6 processor. + default y if DRM_IMX=y + default m if DRM_IMX=m config DRM_IMX_HDMI tristate "Freescale i.MX DRM HDMI" diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 2f8007241734..b250130debc8 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -30,8 +30,6 @@ #define MAX_CRTC 4 -struct imx_drm_crtc; - struct imx_drm_component { struct device_node *of_node; struct list_head list; @@ -633,7 +631,8 @@ static int imx_drm_platform_probe(struct platform_device *pdev) continue; } - component_match_add(&pdev->dev, &match, compare_of, remote); + component_match_add(&pdev->dev, &match, compare_of, + remote); of_node_put(remote); } of_node_put(port); @@ -692,7 +691,6 @@ static struct platform_driver imx_drm_pdrv = { .probe = imx_drm_platform_probe, .remove = imx_drm_platform_remove, .driver = { - .owner = THIS_MODULE, .name = "imx-drm", .pm = &imx_drm_pm_ops, .of_match_table = imx_drm_dt_ids, diff --git a/drivers/gpu/drm/imx/imx-hdmi.c b/drivers/gpu/drm/imx/imx-hdmi.c index aaec6b2cdf56..ddc53e039530 100644 --- a/drivers/gpu/drm/imx/imx-hdmi.c +++ b/drivers/gpu/drm/imx/imx-hdmi.c @@ -1754,7 +1754,6 @@ static struct platform_driver imx_hdmi_driver = { .remove = imx_hdmi_platform_remove, .driver = { .name = "imx-hdmi", - .owner = THIS_MODULE, .of_match_table = imx_hdmi_dt_ids, }, }; diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 4662e00b456a..c60460043e24 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -11,11 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301, USA. */ #include <linux/module.h> @@ -604,7 +599,6 @@ static struct platform_driver imx_ldb_driver = { .driver = { .of_match_table = imx_ldb_dt_ids, .name = DRIVER_NAME, - .owner = THIS_MODULE, }, }; diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 42c651be6c20..a729f4f7074c 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -11,11 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301, USA. */ #include <linux/clk.h> @@ -665,7 +660,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) ret = regmap_read(tve->regmap, TVE_COM_CONF_REG, &val); if (ret < 0) { - dev_err(dev, "failed to read configuration register: %d\n", ret); + dev_err(dev, "failed to read configuration register: %d\n", + ret); return ret; } if (val != 0x00100000) { @@ -724,7 +720,6 @@ static struct platform_driver imx_tve_driver = { .driver = { .of_match_table = imx_tve_dt_ids, .name = "imx-tve", - .owner = THIS_MODULE, }, }; diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 11e84a251773..ebee59cb96d8 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -11,11 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301, USA. */ #include <linux/component.h> #include <linux/module.h> diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 944962b692bb..6987e16fe99b 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -64,6 +64,7 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb, { struct drm_gem_cma_object *cma_obj; unsigned long eba; + int active; cma_obj = drm_fb_cma_get_gem_obj(fb, 0); if (!cma_obj) { @@ -74,12 +75,17 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb, dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d", &cma_obj->paddr, x, y); - ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]); - eba = cma_obj->paddr + fb->offsets[0] + fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x; - ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba); - ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba); + + if (ipu_plane->enabled) { + active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); + ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); + ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active); + } else { + ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba); + ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba); + } /* cache offsets for subsequent pageflips */ ipu_plane->x = x; @@ -137,6 +143,18 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc, if (crtc_h < 2) return -EINVAL; + /* + * since we cannot touch active IDMAC channels, we do not support + * resizing the enabled plane or changing its format + */ + if (ipu_plane->enabled) { + if (src_w != ipu_plane->w || src_h != ipu_plane->h || + fb->pixel_format != ipu_plane->base.fb->pixel_format) + return -EINVAL; + + return ipu_plane_set_base(ipu_plane, fb, src_x, src_y); + } + switch (ipu_plane->dp_flow) { case IPU_DP_FLOW_SYNC_BG: ret = ipu_dp_setup_channel(ipu_plane->dp, @@ -148,14 +166,22 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc, ret); return ret; } - ipu_dp_set_global_alpha(ipu_plane->dp, 1, 0, 1); + ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); break; case IPU_DP_FLOW_SYNC_FG: ipu_dp_setup_channel(ipu_plane->dp, ipu_drm_fourcc_to_colorspace(fb->pixel_format), IPUV3_COLORSPACE_UNKNOWN); ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y); - break; + /* Enable local alpha on partial plane */ + switch (fb->pixel_format) { + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false); + break; + default: + break; + } } ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w); @@ -181,11 +207,16 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc, return ret; } ipu_cpmem_set_high_priority(ipu_plane->ipu_ch); + ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1); + ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]); ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y); if (ret < 0) return ret; + ipu_plane->w = src_w; + ipu_plane->h = src_h; + return 0; } diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h index c0aae5bcb5d4..af125fb40ef5 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.h +++ b/drivers/gpu/drm/imx/ipuv3-plane.h @@ -26,6 +26,8 @@ struct ipu_plane { int x; int y; + int w; + int h; bool enabled; }; diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 015a454b87e1..796c3c1c170a 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -11,11 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301, USA. */ #include <linux/component.h> @@ -128,6 +123,10 @@ static void imx_pd_encoder_prepare(struct drm_encoder *encoder) static void imx_pd_encoder_commit(struct drm_encoder *encoder) { + struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); + + drm_panel_prepare(imxpd->panel); + drm_panel_enable(imxpd->panel); } static void imx_pd_encoder_mode_set(struct drm_encoder *encoder, @@ -138,6 +137,10 @@ static void imx_pd_encoder_mode_set(struct drm_encoder *encoder, static void imx_pd_encoder_disable(struct drm_encoder *encoder) { + struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); + + drm_panel_disable(imxpd->panel); + drm_panel_unprepare(imxpd->panel); } static struct drm_connector_funcs imx_pd_connector_funcs = { @@ -284,7 +287,6 @@ static struct platform_driver imx_pd_driver = { .driver = { .of_match_table = imx_pd_dt_ids, .name = "imx-parallel-display", - .owner = THIS_MODULE, }, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index aa873048308b..94a5bee69fe7 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -386,9 +386,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu) msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); drm_gem_object_unreference(gpu->memptrs_bo); } - if (gpu->pm4) - release_firmware(gpu->pm4); - if (gpu->pfp) - release_firmware(gpu->pfp); + release_firmware(gpu->pm4); + release_firmware(gpu->pfp); msm_gpu_cleanup(&gpu->base); } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index fbebb0405d76..b4e70e0e3cfa 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -141,6 +141,15 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) uint32_t hpd_ctrl; int i, ret; + for (i = 0; i < config->hpd_reg_cnt; i++) { + ret = regulator_enable(hdmi->hpd_regs[i]); + if (ret) { + dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n", + config->hpd_reg_names[i], ret); + goto fail; + } + } + ret = gpio_config(hdmi, true); if (ret) { dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret); @@ -164,15 +173,6 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) } } - for (i = 0; i < config->hpd_reg_cnt; i++) { - ret = regulator_enable(hdmi->hpd_regs[i]); - if (ret) { - dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n", - config->hpd_reg_names[i], ret); - goto fail; - } - } - hdmi_set_mode(hdmi, false); phy->funcs->reset(phy); hdmi_set_mode(hdmi, true); @@ -200,7 +200,7 @@ fail: return ret; } -static int hdp_disable(struct hdmi_connector *hdmi_connector) +static void hdp_disable(struct hdmi_connector *hdmi_connector) { struct hdmi *hdmi = hdmi_connector->hdmi; const struct hdmi_platform_config *config = hdmi->config; @@ -212,28 +212,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector) hdmi_set_mode(hdmi, false); - for (i = 0; i < config->hpd_reg_cnt; i++) { - ret = regulator_disable(hdmi->hpd_regs[i]); - if (ret) { - dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n", - config->hpd_reg_names[i], ret); - goto fail; - } - } - for (i = 0; i < config->hpd_clk_cnt; i++) clk_disable_unprepare(hdmi->hpd_clks[i]); ret = gpio_config(hdmi, false); - if (ret) { - dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret); - goto fail; - } - - return 0; + if (ret) + dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret); -fail: - return ret; + for (i = 0; i < config->hpd_reg_cnt; i++) { + ret = regulator_disable(hdmi->hpd_regs[i]); + if (ret) + dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n", + config->hpd_reg_names[i], ret); + } } static void @@ -260,11 +251,11 @@ void hdmi_connector_irq(struct drm_connector *connector) (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) { bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED); - DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl); - - /* ack the irq: */ + /* ack & disable (temporarily) HPD events: */ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, - hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK); + HDMI_HPD_INT_CTRL_INT_ACK); + + DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl); /* detect disconnect if we are connected or visa versa: */ hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 6781aa994613..3449213f1e76 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -84,7 +84,7 @@ static void crtc_flush(struct drm_crtc *crtc) struct drm_plane *plane; uint32_t flush = 0; - for_each_plane_on_crtc(crtc, plane) { + drm_atomic_crtc_for_each_plane(plane, crtc) { enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); flush |= pipe2flush(pipe_id); } @@ -197,7 +197,7 @@ static void setup_mixer(struct mdp4_kms *mdp4_kms) struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct drm_plane *plane; - for_each_plane_on_crtc(crtc, plane) { + drm_atomic_crtc_for_each_plane(plane, crtc) { enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); int idx = idxs[pipe_id]; mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer, @@ -221,7 +221,7 @@ static void blend_setup(struct drm_crtc *crtc) mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0); mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0); - for_each_plane_on_crtc(crtc, plane) { + drm_atomic_crtc_for_each_plane(plane, crtc) { enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); int idx = idxs[pipe_id]; if (idx > 0) { @@ -331,17 +331,8 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct drm_device *dev = crtc->dev; - DBG("%s: check", mdp4_crtc->name); - - if (mdp4_crtc->event) { - dev_err(dev->dev, "already pending flip!\n"); - return -EBUSY; - } - // TODO anything else to check? - return 0; } @@ -357,7 +348,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; unsigned long flags; - DBG("%s: flush", mdp4_crtc->name); + DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event); WARN_ON(mdp4_crtc->event); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 76d0a40c7138..1e5ebe83647d 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -107,7 +107,8 @@ static int mdp4_plane_atomic_check(struct drm_plane *plane, return 0; } -static void mdp4_plane_atomic_update(struct drm_plane *plane) +static void mdp4_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct drm_plane_state *state = plane->state; int ret; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 0598bdea4ff4..f021f960a8a2 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -91,7 +91,7 @@ static void crtc_flush_all(struct drm_crtc *crtc) if (!mdp5_crtc->ctl) return; - for_each_plane_on_crtc(crtc, plane) { + drm_atomic_crtc_for_each_plane(plane, crtc) { flush_mask |= mdp5_plane_get_flush(plane); } flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl); @@ -124,8 +124,9 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) } spin_unlock_irqrestore(&dev->event_lock, flags); - for_each_plane_on_crtc(crtc, plane) + drm_atomic_crtc_for_each_plane(plane, crtc) { mdp5_plane_complete_flip(plane); + } } static void mdp5_crtc_destroy(struct drm_crtc *crtc) @@ -195,7 +196,7 @@ static void blend_setup(struct drm_crtc *crtc) if (!mdp5_crtc->ctl) goto out; - for_each_plane_on_crtc(crtc, plane) { + drm_atomic_crtc_for_each_plane(plane, crtc) { enum mdp_mixer_stage_id stage = to_mdp5_plane_state(plane->state)->stage; @@ -302,11 +303,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, DBG("%s: check", mdp5_crtc->name); - if (mdp5_crtc->event) { - dev_err(dev->dev, "already pending flip!\n"); - return -EBUSY; - } - /* request a free CTL, if none is already allocated for this CRTC */ if (state->enable && !mdp5_crtc->ctl) { mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc); @@ -317,7 +313,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, /* verify that there are not too many planes attached to crtc * and that we don't have conflicting mixer stages: */ - for_each_pending_plane_on_crtc(state->state, crtc, plane) { + drm_atomic_crtc_state_for_each_plane(plane, state) { struct drm_plane_state *pstate; if (cnt >= ARRAY_SIZE(pstates)) { @@ -363,7 +359,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; unsigned long flags; - DBG("%s: flush", mdp5_crtc->name); + DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event); WARN_ON(mdp5_crtc->event); @@ -459,10 +455,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, /* now that we know what irq's we want: */ mdp5_crtc->err.irqmask = intf2err(intf); mdp5_crtc->vblank.irqmask = intf2vblank(intf); - - /* when called from modeset_init(), skip the rest until later: */ - if (!mdp5_kms) - return; + mdp_irq_update(&mdp5_kms->base); spin_lock_irqsave(&mdp5_kms->resource_lock, flags); intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index a11f1b80c488..9f01a4f21af2 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -216,17 +216,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) goto fail; } - /* NOTE: the vsync and error irq's are actually associated with - * the INTF/encoder.. the easiest way to deal with this (ie. what - * we do now) is assume a fixed relationship between crtc's and - * encoders. I'm not sure if there is ever a need to more freely - * assign crtcs to encoders, but if there is then we need to take - * care of error and vblank irq's that the crtc has registered, - * and also update user-requested vblank_mask. - */ - encoder->possible_crtcs = BIT(0); - mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI); - + encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;; priv->encoders[priv->num_encoders++] = encoder; /* Construct bridge/connector for HDMI: */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 533df7caa310..26e5fdea6594 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -213,7 +213,8 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, return 0; } -static void mdp5_plane_atomic_update(struct drm_plane *plane) +static void mdp5_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct drm_plane_state *state = plane->state; diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c index 03455b64a245..2a731722d840 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c @@ -42,7 +42,10 @@ static void update_irq(struct mdp_kms *mdp_kms) mdp_kms->funcs->set_irqmask(mdp_kms, irqmask); } -static void update_irq_unlocked(struct mdp_kms *mdp_kms) +/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder + * link changes, this must be called to figure out the new global irqmask + */ +void mdp_irq_update(struct mdp_kms *mdp_kms) { unsigned long flags; spin_lock_irqsave(&list_lock, flags); @@ -122,7 +125,7 @@ void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq) spin_unlock_irqrestore(&list_lock, flags); if (needs_update) - update_irq_unlocked(mdp_kms); + mdp_irq_update(mdp_kms); } void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) @@ -141,5 +144,5 @@ void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) spin_unlock_irqrestore(&list_lock, flags); if (needs_update) - update_irq_unlocked(mdp_kms); + mdp_irq_update(mdp_kms); } diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h index 99557b5ad4fd..b268ce95d394 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h @@ -75,7 +75,7 @@ void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable) void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask); void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq); void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq); - +void mdp_irq_update(struct mdp_kms *mdp_kms); /* * pixel format helpers: diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index f0de412e13dc..191968256c58 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -23,10 +23,41 @@ struct msm_commit { struct drm_atomic_state *state; uint32_t fence; struct msm_fence_cb fence_cb; + uint32_t crtc_mask; }; static void fence_cb(struct msm_fence_cb *cb); +/* block until specified crtcs are no longer pending update, and + * atomically mark them as pending update + */ +static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) +{ + int ret; + + spin_lock(&priv->pending_crtcs_event.lock); + ret = wait_event_interruptible_locked(priv->pending_crtcs_event, + !(priv->pending_crtcs & crtc_mask)); + if (ret == 0) { + DBG("start: %08x", crtc_mask); + priv->pending_crtcs |= crtc_mask; + } + spin_unlock(&priv->pending_crtcs_event.lock); + + return ret; +} + +/* clear specified crtcs (no longer pending update) + */ +static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) +{ + spin_lock(&priv->pending_crtcs_event.lock); + DBG("end: %08x", crtc_mask); + priv->pending_crtcs &= ~crtc_mask; + wake_up_all_locked(&priv->pending_crtcs_event); + spin_unlock(&priv->pending_crtcs_event.lock); +} + static struct msm_commit *new_commit(struct drm_atomic_state *state) { struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); @@ -58,12 +89,27 @@ static void complete_commit(struct msm_commit *c) drm_atomic_helper_commit_post_planes(dev, state); + /* NOTE: _wait_for_vblanks() only waits for vblank on + * enabled CRTCs. So we end up faulting when disabling + * due to (potentially) unref'ing the outgoing fb's + * before the vblank when the disable has latched. + * + * But if it did wait on disabled (or newly disabled) + * CRTCs, that would be racy (ie. we could have missed + * the irq. We need some way to poll for pipe shut + * down. Or just live with occasionally hitting the + * timeout in the CRTC disable path (which really should + * not be critical path) + */ + drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_state_free(state); + end_atomic(dev->dev_private, c->crtc_mask); + kfree(c); } @@ -97,8 +143,9 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb) int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async) { - struct msm_commit *c; int nplanes = dev->mode_config.num_total_plane; + int ncrtcs = dev->mode_config.num_crtc; + struct msm_commit *c; int i, ret; ret = drm_atomic_helper_prepare_planes(dev, state); @@ -106,6 +153,18 @@ int msm_atomic_commit(struct drm_device *dev, return ret; c = new_commit(state); + if (!c) + return -ENOMEM; + + /* + * Figure out what crtcs we have: + */ + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc *crtc = state->crtcs[i]; + if (!crtc) + continue; + c->crtc_mask |= (1 << drm_crtc_index(crtc)); + } /* * Figure out what fence to wait for: @@ -122,6 +181,14 @@ int msm_atomic_commit(struct drm_device *dev, } /* + * Wait for pending updates on any of the same crtc's and then + * mark our set of crtc's as busy: + */ + ret = start_atomic(dev->dev_private, c->crtc_mask); + if (ret) + return ret; + + /* * This is the point of no return - everything below never fails except * when the hw goes bonghits. Which means we can commit the new state on * the software side now. diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index d3b791b7ddef..9a61546a0b05 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -193,6 +193,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags) priv->wq = alloc_ordered_workqueue("msm", 0); init_waitqueue_head(&priv->fence_event); + init_waitqueue_head(&priv->pending_crtcs_event); INIT_LIST_HEAD(&priv->inactive_list); INIT_LIST_HEAD(&priv->fence_cbs); @@ -1012,7 +1013,6 @@ static struct platform_driver msm_platform_driver = { .probe = msm_pdev_probe, .remove = msm_pdev_remove, .driver = { - .owner = THIS_MODULE, .name = "msm", .of_match_table = dt_match, .pm = &msm_pm_ops, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 136303818436..b69ef2d5a26c 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -96,6 +96,10 @@ struct msm_drm_private { /* callbacks deferred until bo is inactive: */ struct list_head fence_cbs; + /* crtcs pending async atomic updates: */ + uint32_t pending_crtcs; + wait_queue_head_t pending_crtcs_event; + /* registered MMUs: */ unsigned int num_mmus; struct msm_mmu *mmus[NUM_DOMAINS]; diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 94d55e526b4e..1f3af13ccede 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -190,8 +190,7 @@ fail_unlock: fail: if (ret) { - if (fbi) - framebuffer_release(fbi); + framebuffer_release(fbi); if (fb) { drm_framebuffer_unregister_private(fb); drm_framebuffer_remove(fb); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 4a6f0e49d5b5..49dea4fb55ac 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -535,8 +535,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) drm_free_large(msm_obj->pages); } else { - if (msm_obj->vaddr) - vunmap(msm_obj->vaddr); + vunmap(msm_obj->vaddr); put_pages(obj); } diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 7fb4876388e7..06437745bc2c 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -65,27 +65,4 @@ static inline void msm_kms_init(struct msm_kms *kms, struct msm_kms *mdp4_kms_init(struct drm_device *dev); struct msm_kms *mdp5_kms_init(struct drm_device *dev); -/* TODO move these helper iterator macro somewhere common: */ -#define for_each_plane_on_crtc(_crtc, _plane) \ - list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \ - if ((_plane)->state->crtc == (_crtc)) - -static inline bool -__plane_will_be_attached_to_crtc(struct drm_atomic_state *state, - struct drm_plane *plane, struct drm_crtc *crtc) -{ - int idx = drm_plane_index(plane); - - /* if plane is modified in incoming state, use the new state: */ - if (state->plane_states[idx]) - return state->plane_states[idx]->crtc == crtc; - - /* otherwise, current state: */ - return plane->state->crtc == crtc; -} - -#define for_each_pending_plane_on_crtc(_state, _crtc, _plane) \ - list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \ - if (__plane_will_be_attached_to_crtc((_state), (_plane), (_crtc))) - #endif /* __MSM_KMS_H__ */ diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c index cd05677ad4b7..72a40f95d048 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c @@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; - device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass; device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; break; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c index 5ae6a43893b5..1931057f9962 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c @@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) } if (status & 0x40000000) { - nouveau_fifo_uevent(&priv->base); nv_wr32(priv, 0x002100, 0x40000000); + nouveau_fifo_uevent(&priv->base); status &= ~0x40000000; } } diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c index 1fe1f8fbda0c..074d434c3077 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c @@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn) u32 inte = nv_rd32(priv, 0x002628); u32 unkn; + nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr); + for (unkn = 0; unkn < 8; unkn++) { u32 ints = (intr >> (unkn * 0x04)) & inte; if (ints & 0x1) { @@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn) nv_mask(priv, 0x002628, ints, 0); } } - - nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr); } static void diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index fc9ef663f25a..6a8db7c80bd1 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c @@ -982,8 +982,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev) } if (stat & 0x80000000) { - nve0_fifo_intr_engine(priv); nv_wr32(priv, 0x002100, 0x80000000); + nve0_fifo_intr_engine(priv); stat &= ~0x80000000; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c index a16024a74771..fde42e4d1b56 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c @@ -27,6 +27,20 @@ struct gk20a_fb_priv { }; static int +gk20a_fb_init(struct nouveau_object *object) +{ + struct gk20a_fb_priv *priv = (void *)object; + int ret; + + ret = nouveau_fb_init(&priv->base); + if (ret) + return ret; + + nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */ + return 0; +} + +static int gk20a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) @@ -48,7 +62,7 @@ gk20a_fb_oclass = &(struct nouveau_fb_impl) { .base.ofuncs = &(struct nouveau_ofuncs) { .ctor = gk20a_fb_ctor, .dtor = _nouveau_fb_dtor, - .init = _nouveau_fb_init, + .init = gk20a_fb_init, .fini = _nouveau_fb_fini, }, .memtype = nvc0_fb_memtype_valid, diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 5d93902a91ab..f8042433752b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -876,7 +876,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, if (ret) return ret; - bo->gem.dumb = true; ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle); drm_gem_object_unreference_unlocked(&bo->gem); return ret; @@ -892,14 +891,6 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv, gem = drm_gem_object_lookup(dev, file_priv, handle); if (gem) { struct nouveau_bo *bo = nouveau_gem_object(gem); - - /* - * We don't allow dumb mmaps on objects created using another - * interface. - */ - WARN_ONCE(!(gem->dumb || gem->import_attach), - "Illegal dumb map of accelerated buffer.\n"); - *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); drm_gem_object_unreference_unlocked(gem); return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index afb93bb72f97..65910e3aed0c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -664,7 +664,6 @@ nouveau_pmops_suspend(struct device *dev) pci_save_state(pdev); pci_disable_device(pdev); - pci_ignore_hotplug(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } @@ -732,6 +731,7 @@ nouveau_pmops_runtime_suspend(struct device *dev) ret = nouveau_do_suspend(drm_dev, true); pci_save_state(pdev); pci_disable_device(pdev); + pci_ignore_hotplug(pdev); pci_set_power_state(pdev, PCI_D3cold); drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 515cd9aebb99..f32a434724e3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence) return container_of(fence->base.lock, struct nouveau_fence_chan, lock); } -static void +static int nouveau_fence_signal(struct nouveau_fence *fence) { + int drop = 0; + fence_signal_locked(&fence->base); list_del(&fence->head); + rcu_assign_pointer(fence->channel, NULL); if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { struct nouveau_fence_chan *fctx = nouveau_fctx(fence); if (!--fctx->notify_ref) - nvif_notify_put(&fctx->notify); + drop = 1; } fence_put(&fence->base); + return drop; } static struct nouveau_fence * @@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx) { struct nouveau_fence *fence; - nvif_notify_fini(&fctx->notify); - spin_lock_irq(&fctx->lock); while (!list_empty(&fctx->pending)) { fence = list_entry(fctx->pending.next, typeof(*fence), head); - nouveau_fence_signal(fence); - fence->channel = NULL; + if (nouveau_fence_signal(fence)) + nvif_notify_put(&fctx->notify); } spin_unlock_irq(&fctx->lock); + + nvif_notify_fini(&fctx->notify); + fctx->dead = 1; + + /* + * Ensure that all accesses to fence->channel complete before freeing + * the channel. + */ + synchronize_rcu(); } static void @@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx) kref_put(&fctx->fence_ref, nouveau_fence_context_put); } -static void +static int nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) { struct nouveau_fence *fence; - + int drop = 0; u32 seq = fctx->read(chan); while (!list_empty(&fctx->pending)) { fence = list_entry(fctx->pending.next, typeof(*fence), head); if ((int)(seq - fence->base.seqno) < 0) - return; + break; - nouveau_fence_signal(fence); + drop |= nouveau_fence_signal(fence); } + + return drop; } static int @@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify) struct nouveau_fence_chan *fctx = container_of(notify, typeof(*fctx), notify); unsigned long flags; + int ret = NVIF_NOTIFY_KEEP; spin_lock_irqsave(&fctx->lock, flags); if (!list_empty(&fctx->pending)) { struct nouveau_fence *fence; + struct nouveau_channel *chan; fence = list_entry(fctx->pending.next, typeof(*fence), head); - nouveau_fence_update(fence->channel, fctx); + chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); + if (nouveau_fence_update(fence->channel, fctx)) + ret = NVIF_NOTIFY_DROP; } spin_unlock_irqrestore(&fctx->lock, flags); - /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */ - return NVIF_NOTIFY_KEEP; + return ret; } void @@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) if (!ret) { fence_get(&fence->base); spin_lock_irq(&fctx->lock); - nouveau_fence_update(chan, fctx); + + if (nouveau_fence_update(chan, fctx)) + nvif_notify_put(&fctx->notify); + list_add_tail(&fence->head, &fctx->pending); spin_unlock_irq(&fctx->lock); } @@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence) if (fence->base.ops == &nouveau_fence_ops_legacy || fence->base.ops == &nouveau_fence_ops_uevent) { struct nouveau_fence_chan *fctx = nouveau_fctx(fence); + struct nouveau_channel *chan; unsigned long flags; if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) return true; spin_lock_irqsave(&fctx->lock, flags); - nouveau_fence_update(fence->channel, fctx); + chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); + if (chan && nouveau_fence_update(chan, fctx)) + nvif_notify_put(&fctx->notify); spin_unlock_irqrestore(&fctx->lock, flags); } return fence_is_signaled(&fence->base); @@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e if (fence && (!exclusive || !fobj || !fobj->shared_count)) { struct nouveau_channel *prev = NULL; + bool must_wait = true; f = nouveau_local_fence(fence, chan->drm); - if (f) - prev = f->channel; + if (f) { + rcu_read_lock(); + prev = rcu_dereference(f->channel); + if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) + must_wait = false; + rcu_read_unlock(); + } - if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) + if (must_wait) ret = fence_wait(fence, intr); return ret; @@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e for (i = 0; i < fobj->shared_count && !ret; ++i) { struct nouveau_channel *prev = NULL; + bool must_wait = true; fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(resv)); f = nouveau_local_fence(fence, chan->drm); - if (f) - prev = f->channel; + if (f) { + rcu_read_lock(); + prev = rcu_dereference(f->channel); + if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) + must_wait = false; + rcu_read_unlock(); + } - if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) + if (must_wait) ret = fence_wait(fence, intr); - - if (ret) - break; } return ret; @@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f) struct nouveau_fence *fence = from_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); - return fence->channel ? fctx->name : "dead channel"; + return !fctx->dead ? fctx->name : "dead channel"; } /* @@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f) { struct nouveau_fence *fence = from_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); - struct nouveau_channel *chan = fence->channel; + struct nouveau_channel *chan; + bool ret = false; + + rcu_read_lock(); + chan = rcu_dereference(fence->channel); + if (chan) + ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0; + rcu_read_unlock(); - return (int)(fctx->read(chan) - fence->base.seqno) >= 0; + return ret; } static bool nouveau_fence_no_signaling(struct fence *f) diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 943b0b17b1fc..96e461c6f68f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -14,7 +14,7 @@ struct nouveau_fence { bool sysmem; - struct nouveau_channel *channel; + struct nouveau_channel __rcu *channel; unsigned long timeout; }; @@ -47,7 +47,7 @@ struct nouveau_fence_chan { char name[32]; struct nvif_notify notify; - int notify_ref; + int notify_ref, dead; }; struct nouveau_fence_priv { diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 5922d2ef4599..bf0f9e21d714 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -469,9 +469,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, list_for_each_entry(nvbo, list, entry) { struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; - WARN_ONCE(nvbo->gem.dumb, - "GPU use of dumb buffer is illegal.\n"); - ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, b->write_domains, b->valid_domains); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 753a6def61e7..3d1cfcb96b6b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -28,6 +28,7 @@ #include "nouveau_ttm.h" #include "nouveau_gem.h" +#include "drm_legacy.h" static int nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) { @@ -281,7 +282,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) - return -EINVAL; + return drm_legacy_mmap(filp, vma); return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 0ea3a88a0dca..490b90866baf 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -821,6 +821,22 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) } static int +nv50_crtc_set_raster_vblank_dmi(struct nouveau_crtc *nv_crtc, u32 usec) +{ + struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); + u32 *push; + + push = evo_wait(mast, 8); + if (!push) + return -ENOMEM; + + evo_mthd(push, 0x0828 + (nv_crtc->index * 0x400), 1); + evo_data(push, usec); + evo_kick(push, mast); + return 0; +} + +static int nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) { struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); @@ -1136,14 +1152,14 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2); evo_data(push, 0x00800000 | mode->clock); evo_data(push, (ilace == 2) ? 2 : 0); - evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 8); + evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6); evo_data(push, 0x00000000); evo_data(push, (vactive << 16) | hactive); evo_data(push, ( vsynce << 16) | hsynce); evo_data(push, (vblanke << 16) | hblanke); evo_data(push, (vblanks << 16) | hblanks); evo_data(push, (vblan2e << 16) | vblan2s); - evo_data(push, vblankus); + evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1); evo_data(push, 0x00000000); evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2); evo_data(push, 0x00000311); @@ -1173,6 +1189,11 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, nv_connector = nouveau_crtc_connector_get(nv_crtc); nv50_crtc_set_dither(nv_crtc, false); nv50_crtc_set_scale(nv_crtc, false); + + /* G94 only accepts this after setting scale */ + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) + nv50_crtc_set_raster_vblank_dmi(nv_crtc, vblankus); + nv50_crtc_set_color_vibrance(nv_crtc, false); nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false); return 0; @@ -1678,7 +1699,8 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) drm_edid_to_eld(&nv_connector->base, nv_connector->edid); memcpy(args.data, nv_connector->base.eld, sizeof(args.data)); - nvif_mthd(disp->disp, 0, &args, sizeof(args.base) + args.data[2] * 4); + nvif_mthd(disp->disp, 0, &args, + sizeof(args.base) + drm_eld_size(args.data)); } static void diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 862ba03c236c..8241ed9b353c 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -718,7 +718,6 @@ static const struct dev_pm_ops omapdrm_pm_ops = { static struct platform_driver pdev = { .driver = { .name = DRIVER_NAME, - .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &omapdrm_pm_ops, #endif diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index c4b6167a8bf3..e95385bf8356 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -807,7 +807,6 @@ static void panel_simple_platform_shutdown(struct platform_device *pdev) static struct platform_driver panel_simple_platform_driver = { .driver = { .name = "panel-simple", - .owner = THIS_MODULE, .of_match_table = platform_of_match, }, .probe = panel_simple_platform_probe, diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 446e71ca36cb..d9b25684ac98 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -264,7 +264,8 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) if (list_is_singular(&release->bos)) return 0; - ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr); + ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, + !no_intr, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 15da7ef344a4..ec1593a6a561 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -1217,7 +1217,7 @@ free: return ret; } -int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) +int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params) { int r; @@ -1238,6 +1238,15 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) return r; } +int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) +{ + int r; + mutex_lock(&ctx->scratch_mutex); + r = atom_execute_table_scratch_unlocked(ctx, index, params); + mutex_unlock(&ctx->scratch_mutex); + return r; +} + static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; static void atom_index_iio(struct atom_context *ctx, int base) diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index feba6b8d36b3..6d014ddb6b78 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h @@ -125,6 +125,7 @@ struct card_info { struct atom_context { struct card_info *card; struct mutex mutex; + struct mutex scratch_mutex; void *bios; uint32_t cmd_table, data_table; uint16_t *iio; @@ -145,6 +146,7 @@ extern int atom_debug; struct atom_context *atom_parse(struct card_info *, void *); int atom_execute_table(struct atom_context *, int, uint32_t *); +int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *); int atom_asic_init(struct atom_context *); void atom_destroy(struct atom_context *); bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 30d242b25078..d59ec491dbb9 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -2039,6 +2039,7 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, atombios_crtc_set_base(crtc, x, y, old_fb); atombios_overscan_setup(crtc, mode, adjusted_mode); atombios_scaler_setup(crtc); + radeon_cursor_reset(crtc); /* update the hw version fpr dpm */ radeon_crtc->hw_mode = *adjusted_mode; diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 95d5d4ab3335..11ba9d21b89b 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -100,6 +100,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, memset(&args, 0, sizeof(args)); mutex_lock(&chan->mutex); + mutex_lock(&rdev->mode_info.atom_context->scratch_mutex); base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); @@ -113,7 +114,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, if (ASIC_IS_DCE4(rdev)) args.v2.ucHPD_ID = chan->rec.hpd; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args); *ack = args.v1.ucReplyStatus; @@ -147,6 +148,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, r = recv_bytes; done: + mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex); mutex_unlock(&chan->mutex); return r; diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index 9c570fb15b8c..4157780585a0 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c @@ -48,6 +48,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, memset(&args, 0, sizeof(args)); mutex_lock(&chan->mutex); + mutex_lock(&rdev->mode_info.atom_context->scratch_mutex); base = (unsigned char *)rdev->mode_info.atom_context->scratch; @@ -82,7 +83,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, args.ucSlaveAddr = slave_addr << 1; args.ucLineNumber = chan->rec.i2c_id; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args); /* error */ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { @@ -95,6 +96,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, radeon_atom_copy_swap(buf, base, num, false); done: + mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex); mutex_unlock(&chan->mutex); return r; diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 3f898d020ae6..f373a81ba3d5 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -937,7 +937,7 @@ static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode) tmp |= TMIN(0); WREG32_SMC(CG_FDO_CTRL2, tmp); - tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; + tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; tmp |= FDO_PWM_MODE(mode); WREG32_SMC(CG_FDO_CTRL2, tmp); } @@ -1162,7 +1162,7 @@ static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev, tmp |= TARGET_PERIOD(tach_period); WREG32_SMC(CG_TACH_CTRL, tmp); - ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); + ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM); return 0; } @@ -1178,7 +1178,7 @@ static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev) tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode); WREG32_SMC(CG_FDO_CTRL2, tmp); - tmp = RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK; + tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; tmp |= TMIN(pi->t_min); WREG32_SMC(CG_FDO_CTRL2, tmp); pi->fan_ctrl_is_in_default_mode = true; @@ -5849,7 +5849,6 @@ int ci_dpm_init(struct radeon_device *rdev) rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; pi->fan_ctrl_is_in_default_mode = true; - rdev->pm.dpm.fan.ucode_fan_control = false; return 0; } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 3deeed33322f..6dcde3798b45 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -4333,8 +4333,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev) /* init the CE partitions. CE only used for gfx on CIK */ radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); - radeon_ring_write(ring, 0xc000); - radeon_ring_write(ring, 0xc000); + radeon_ring_write(ring, 0x8000); + radeon_ring_write(ring, 0x8000); /* setup clear context state */ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); @@ -9417,6 +9417,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev) u32 num_heads = 0, lb_size; int i; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); for (i = 0; i < rdev->num_crtc; i++) { diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 54b98379188d..dde5c7e29eb2 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -663,17 +663,20 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) { struct radeon_ib ib; unsigned i; + unsigned index; int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; u32 tmp = 0; + u64 gpu_addr; - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + index = R600_WB_DMA_RING_TEST_OFFSET; + else + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; + + gpu_addr = rdev->wb.gpu_addr + index; tmp = 0xCAFEDEAD; - writel(tmp, ptr); + rdev->wb.wb[index/4] = cpu_to_le32(tmp); r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); if (r) { @@ -682,8 +685,8 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) } ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; - ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr); + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr); ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; @@ -700,7 +703,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); + tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index e4e88ca8b82e..ba85986febea 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -213,18 +213,18 @@ #define CG_FDO_CTRL0 0xC0300064 #define FDO_STATIC_DUTY(x) ((x) << 0) -#define FDO_STATIC_DUTY_MASK 0x0000000F +#define FDO_STATIC_DUTY_MASK 0x000000FF #define FDO_STATIC_DUTY_SHIFT 0 #define CG_FDO_CTRL1 0xC0300068 #define FMAX_DUTY100(x) ((x) << 0) -#define FMAX_DUTY100_MASK 0x0000000F +#define FMAX_DUTY100_MASK 0x000000FF #define FMAX_DUTY100_SHIFT 0 #define CG_FDO_CTRL2 0xC030006C #define TMIN(x) ((x) << 0) -#define TMIN_MASK 0x0000000F +#define TMIN_MASK 0x000000FF #define TMIN_SHIFT 0 #define FDO_PWM_MODE(x) ((x) << 11) -#define FDO_PWM_MODE_MASK (3 << 11) +#define FDO_PWM_MODE_MASK (7 << 11) #define FDO_PWM_MODE_SHIFT 11 #define TACH_PWM_RESP_RATE(x) ((x) << 25) #define TACH_PWM_RESP_RATE_MASK (0x7f << 25) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f37d39d2bbbc..85995b4e3338 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2345,6 +2345,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev) u32 num_heads = 0, lb_size; int i; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); for (i = 0; i < rdev->num_crtc; i++) { @@ -2552,6 +2555,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } } else { tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 5c8b358f9fba..924b1b7ab455 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -35,7 +35,7 @@ #define MIN(a,b) (((a)<(b))?(a):(b)) int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, - struct radeon_cs_reloc **cs_reloc); + struct radeon_bo_list **cs_reloc); struct evergreen_cs_track { u32 group_size; u32 nbanks; @@ -1094,7 +1094,7 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p, static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) { struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; u32 last_reg; u32 m, i, tmp, *ib; int r; @@ -1792,7 +1792,7 @@ static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) static int evergreen_packet3_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; struct evergreen_cs_track *track; volatile u32 *ib; unsigned idx; @@ -2661,7 +2661,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p) p->track = NULL; return r; } - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); + } while (p->idx < p->chunk_ib->length_dw); #if 0 for (r = 0; r < p->ib.length_dw; r++) { printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); @@ -2684,8 +2684,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p) **/ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) { - struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; - struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc; + struct radeon_cs_chunk *ib_chunk = p->chunk_ib; + struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc; u32 header, cmd, count, sub_cmd; volatile u32 *ib = p->ib.ptr; u32 idx; @@ -3100,7 +3100,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); return -EINVAL; } - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); + } while (p->idx < p->chunk_ib->length_dw); #if 0 for (r = 0; r < p->ib->length_dw; r++) { printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index 4a85bb644e24..b928c17bdeed 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c @@ -347,7 +347,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_entry - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_entry(ptr, type, member) \ container_of(ptr, type, member) @@ -356,7 +356,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_first_entry - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ @@ -406,7 +406,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry(pos, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member); \ @@ -417,7 +417,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_reverse - iterate backwards over list of given type. * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_entry((head)->prev, typeof(*pos), member); \ @@ -428,7 +428,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() * @pos: the type * to use as a start point * @head: the head of the list - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). */ @@ -439,7 +439,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_continue - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position. @@ -453,7 +453,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_continue_reverse - iterate backwards from the given point * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Start to iterate over list of given type backwards, continuing after * the current position. @@ -467,7 +467,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_from - iterate over list of given type from the current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing from current position. */ @@ -480,7 +480,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member), \ @@ -493,7 +493,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing after current point, * safe against removal of list entry. @@ -509,7 +509,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type from current point, safe against * removal of list entry. @@ -524,7 +524,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate backwards over list of given type, safe against removal * of list entry. diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 10f8be0ee173..74f06d540591 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1254,7 +1254,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p, int r; u32 tile_flags = 0; u32 tmp; - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; u32 value; r = radeon_cs_packet_next_reloc(p, &reloc, 0); @@ -1293,7 +1293,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, int idx) { unsigned c, i; - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; struct r100_cs_track *track; int r = 0; volatile uint32_t *ib; @@ -1542,7 +1542,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx, unsigned reg) { - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; struct r100_cs_track *track; volatile uint32_t *ib; uint32_t tmp; @@ -1901,7 +1901,7 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, static int r100_packet3_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; struct r100_cs_track *track; unsigned idx; volatile uint32_t *ib; @@ -2061,7 +2061,7 @@ int r100_cs_parse(struct radeon_cs_parser *p) } if (r) return r; - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); + } while (p->idx < p->chunk_ib->length_dw); return 0; } @@ -3207,6 +3207,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) uint32_t pixel_bytes1 = 0; uint32_t pixel_bytes2 = 0; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) { diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 732d4938aab7..c70e6d5bcd19 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -146,7 +146,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx, unsigned reg) { - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; struct r100_cs_track *track; volatile uint32_t *ib; uint32_t tmp; diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 1bc4704034ce..064ad5569cca 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -598,7 +598,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx, unsigned reg) { - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; struct r100_cs_track *track; volatile uint32_t *ib; uint32_t tmp, tile_flags = 0; @@ -1142,7 +1142,7 @@ fail: static int r300_packet3_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; struct r100_cs_track *track; volatile uint32_t *ib; unsigned idx; @@ -1283,7 +1283,7 @@ int r300_cs_parse(struct radeon_cs_parser *p) if (r) { return r; } - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); + } while (p->idx < p->chunk_ib->length_dw); return 0; } diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index c47537a1ddba..acc1f99c84d9 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -969,7 +969,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p, static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) { struct r600_cs_track *track = (struct r600_cs_track *)p->track; - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; u32 m, i, tmp, *ib; int r; @@ -1626,7 +1626,7 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) static int r600_packet3_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; struct r600_cs_track *track; volatile u32 *ib; unsigned idx; @@ -2316,7 +2316,7 @@ int r600_cs_parse(struct radeon_cs_parser *p) p->track = NULL; return r; } - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); + } while (p->idx < p->chunk_ib->length_dw); #if 0 for (r = 0; r < p->ib.length_dw; r++) { printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); @@ -2351,10 +2351,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error) static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) { - if (p->chunk_relocs_idx == -1) { + if (p->chunk_relocs == NULL) { return 0; } - p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL); + p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL); if (p->relocs == NULL) { return -ENOMEM; } @@ -2398,7 +2398,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, /* Copy the packet into the IB, the parser will read from the * input memory (cached) and write to the IB (which can be * uncached). */ - ib_chunk = &parser.chunks[parser.chunk_ib_idx]; + ib_chunk = parser.chunk_ib; parser.ib.length_dw = ib_chunk->length_dw; *l = parser.ib.length_dw; if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) { @@ -2435,24 +2435,24 @@ void r600_cs_legacy_init(void) * GPU offset using the provided start. **/ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, - struct radeon_cs_reloc **cs_reloc) + struct radeon_bo_list **cs_reloc) { struct radeon_cs_chunk *relocs_chunk; unsigned idx; *cs_reloc = NULL; - if (p->chunk_relocs_idx == -1) { + if (p->chunk_relocs == NULL) { DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } - relocs_chunk = &p->chunks[p->chunk_relocs_idx]; + relocs_chunk = p->chunk_relocs; idx = p->dma_reloc_idx; if (idx >= p->nrelocs) { DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", idx, p->nrelocs); return -EINVAL; } - *cs_reloc = p->relocs_ptr[idx]; + *cs_reloc = &p->relocs[idx]; p->dma_reloc_idx++; return 0; } @@ -2472,8 +2472,8 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, **/ int r600_dma_cs_parse(struct radeon_cs_parser *p) { - struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; - struct radeon_cs_reloc *src_reloc, *dst_reloc; + struct radeon_cs_chunk *ib_chunk = p->chunk_ib; + struct radeon_bo_list *src_reloc, *dst_reloc; u32 header, cmd, count, tiled; volatile u32 *ib = p->ib.ptr; u32 idx, idx_value; @@ -2619,7 +2619,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); return -EINVAL; } - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); + } while (p->idx < p->chunk_ib->length_dw); #if 0 for (r = 0; r < p->ib->length_dw; r++) { printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 3a58b8073f49..d2dd29ab24fa 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -338,17 +338,17 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) { struct radeon_ib ib; unsigned i; + unsigned index; int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; u32 tmp = 0; + u64 gpu_addr; - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + index = R600_WB_DMA_RING_TEST_OFFSET; + else + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; - tmp = 0xCAFEDEAD; - writel(tmp, ptr); + gpu_addr = rdev->wb.gpu_addr + index; r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); if (r) { @@ -357,8 +357,8 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) } ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); - ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; - ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; ib.ptr[3] = 0xDEADBEEF; ib.length_dw = 4; @@ -374,7 +374,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); + tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 76c6a17eeb2d..843b65f46ece 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -1265,7 +1265,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = - ppt->usMaximumPowerDeliveryLimit; + le16_to_cpu(ppt->usMaximumPowerDeliveryLimit); pt = &ppt->power_tune_table; } else { ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3207bb60715e..54529b837afa 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -450,6 +450,15 @@ struct radeon_mman { #endif }; +struct radeon_bo_list { + struct radeon_bo *robj; + struct ttm_validate_buffer tv; + uint64_t gpu_offset; + unsigned prefered_domains; + unsigned allowed_domains; + uint32_t tiling_flags; +}; + /* bo virtual address in a specific vm */ struct radeon_bo_va { /* protected by bo being reserved */ @@ -920,6 +929,9 @@ struct radeon_vm { struct rb_root va; + /* protecting invalidated and freed */ + spinlock_t status_lock; + /* BOs moved, but not yet updated in the PT */ struct list_head invalidated; @@ -1044,19 +1056,7 @@ void cayman_dma_fini(struct radeon_device *rdev); /* * CS. */ -struct radeon_cs_reloc { - struct drm_gem_object *gobj; - struct radeon_bo *robj; - struct ttm_validate_buffer tv; - uint64_t gpu_offset; - unsigned prefered_domains; - unsigned allowed_domains; - uint32_t tiling_flags; - uint32_t handle; -}; - struct radeon_cs_chunk { - uint32_t chunk_id; uint32_t length_dw; uint32_t *kdata; void __user *user_ptr; @@ -1074,16 +1074,15 @@ struct radeon_cs_parser { unsigned idx; /* relocations */ unsigned nrelocs; - struct radeon_cs_reloc *relocs; - struct radeon_cs_reloc **relocs_ptr; - struct radeon_cs_reloc *vm_bos; + struct radeon_bo_list *relocs; + struct radeon_bo_list *vm_bos; struct list_head validated; unsigned dma_reloc_idx; /* indices of various chunks */ - int chunk_ib_idx; - int chunk_relocs_idx; - int chunk_flags_idx; - int chunk_const_ib_idx; + struct radeon_cs_chunk *chunk_ib; + struct radeon_cs_chunk *chunk_relocs; + struct radeon_cs_chunk *chunk_flags; + struct radeon_cs_chunk *chunk_const_ib; struct radeon_ib ib; struct radeon_ib const_ib; void *track; @@ -1097,7 +1096,7 @@ struct radeon_cs_parser { static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) { - struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; + struct radeon_cs_chunk *ibc = p->chunk_ib; if (ibc->kdata) return ibc->kdata[idx]; @@ -2975,7 +2974,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev); void radeon_vm_manager_fini(struct radeon_device *rdev); int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); -struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, +struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, struct radeon_vm *vm, struct list_head *head); struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, @@ -3089,7 +3088,7 @@ bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p); void radeon_cs_dump_packet(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt); int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, - struct radeon_cs_reloc **cs_reloc, + struct radeon_bo_list **cs_reloc, int nomm); int r600_cs_common_vline_parse(struct radeon_cs_parser *p, uint32_t *vline_start_end, diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 300c4b3d4669..26baa9c05f6c 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -322,6 +322,12 @@ static void radeon_connector_get_edid(struct drm_connector *connector) } if (!radeon_connector->edid) { + /* don't fetch the edid from the vbios if ddc fails and runpm is + * enabled so we report disconnected. + */ + if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) + return; + if (rdev->is_atom_bios) { /* some laptops provide a hardcoded edid in rom for LCDs */ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || @@ -826,6 +832,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector, static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector, bool force) { + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = radeon_best_single_encoder(connector); enum drm_connector_status ret = connector_status_disconnected; @@ -842,7 +850,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) /* check if panel is valid */ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) ret = connector_status_connected; - + /* don't fetch the edid from the vbios if ddc fails and runpm is + * enabled so we report disconnected. + */ + if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) + ret = connector_status_disconnected; } /* check for edid as well */ @@ -1589,6 +1601,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) /* check if panel is valid */ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) ret = connector_status_connected; + /* don't fetch the edid from the vbios if ddc fails and runpm is + * enabled so we report disconnected. + */ + if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) + ret = connector_status_disconnected; } /* eDP is always DP */ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 75f22e5e999f..c830863bc98a 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -77,22 +77,18 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) struct drm_device *ddev = p->rdev->ddev; struct radeon_cs_chunk *chunk; struct radeon_cs_buckets buckets; - unsigned i, j; - bool duplicate, need_mmap_lock = false; + unsigned i; + bool need_mmap_lock = false; int r; - if (p->chunk_relocs_idx == -1) { + if (p->chunk_relocs == NULL) { return 0; } - chunk = &p->chunks[p->chunk_relocs_idx]; + chunk = p->chunk_relocs; p->dma_reloc_idx = 0; /* FIXME: we assume that each relocs use 4 dwords */ p->nrelocs = chunk->length_dw / 4; - p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); - if (p->relocs_ptr == NULL) { - return -ENOMEM; - } - p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL); + p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL); if (p->relocs == NULL) { return -ENOMEM; } @@ -101,31 +97,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) for (i = 0; i < p->nrelocs; i++) { struct drm_radeon_cs_reloc *r; + struct drm_gem_object *gobj; unsigned priority; - duplicate = false; r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; - for (j = 0; j < i; j++) { - if (r->handle == p->relocs[j].handle) { - p->relocs_ptr[i] = &p->relocs[j]; - duplicate = true; - break; - } - } - if (duplicate) { - p->relocs[i].handle = 0; - continue; - } - - p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp, - r->handle); - if (p->relocs[i].gobj == NULL) { + gobj = drm_gem_object_lookup(ddev, p->filp, r->handle); + if (gobj == NULL) { DRM_ERROR("gem object lookup failed 0x%x\n", r->handle); return -ENOENT; } - p->relocs_ptr[i] = &p->relocs[i]; - p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); + p->relocs[i].robj = gem_to_radeon_bo(gobj); /* The userspace buffer priorities are from 0 to 15. A higher * number means the buffer is more important. @@ -184,7 +166,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; p->relocs[i].tv.shared = !r->write_domain; - p->relocs[i].handle = r->handle; radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, priority); @@ -251,22 +232,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority static int radeon_cs_sync_rings(struct radeon_cs_parser *p) { - int i, r = 0; + struct radeon_bo_list *reloc; + int r; - for (i = 0; i < p->nrelocs; i++) { + list_for_each_entry(reloc, &p->validated, tv.head) { struct reservation_object *resv; - if (!p->relocs[i].robj) - continue; - - resv = p->relocs[i].robj->tbo.resv; + resv = reloc->robj->tbo.resv; r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, - p->relocs[i].tv.shared); - + reloc->tv.shared); if (r) - break; + return r; } - return r; + return 0; } /* XXX: note that this is called from the legacy UMS CS ioctl as well */ @@ -286,10 +264,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) p->idx = 0; p->ib.sa_bo = NULL; p->const_ib.sa_bo = NULL; - p->chunk_ib_idx = -1; - p->chunk_relocs_idx = -1; - p->chunk_flags_idx = -1; - p->chunk_const_ib_idx = -1; + p->chunk_ib = NULL; + p->chunk_relocs = NULL; + p->chunk_flags = NULL; + p->chunk_const_ib = NULL; p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); if (p->chunks_array == NULL) { return -ENOMEM; @@ -316,24 +294,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return -EFAULT; } p->chunks[i].length_dw = user_chunk.length_dw; - p->chunks[i].chunk_id = user_chunk.chunk_id; - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { - p->chunk_relocs_idx = i; + if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) { + p->chunk_relocs = &p->chunks[i]; } - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { - p->chunk_ib_idx = i; + if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { + p->chunk_ib = &p->chunks[i]; /* zero length IB isn't useful */ if (p->chunks[i].length_dw == 0) return -EINVAL; } - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) { - p->chunk_const_ib_idx = i; + if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) { + p->chunk_const_ib = &p->chunks[i]; /* zero length CONST IB isn't useful */ if (p->chunks[i].length_dw == 0) return -EINVAL; } - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { - p->chunk_flags_idx = i; + if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { + p->chunk_flags = &p->chunks[i]; /* zero length flags aren't useful */ if (p->chunks[i].length_dw == 0) return -EINVAL; @@ -342,10 +319,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) size = p->chunks[i].length_dw; cdata = (void __user *)(unsigned long)user_chunk.chunk_data; p->chunks[i].user_ptr = cdata; - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) + if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) continue; - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { + if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP)) continue; } @@ -358,7 +335,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) if (copy_from_user(p->chunks[i].kdata, cdata, size)) { return -EFAULT; } - if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { + if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { p->cs_flags = p->chunks[i].kdata[0]; if (p->chunks[i].length_dw > 1) ring = p->chunks[i].kdata[1]; @@ -399,8 +376,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) static int cmp_size_smaller_first(void *priv, struct list_head *a, struct list_head *b) { - struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head); - struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head); + struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head); + struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); /* Sort A before B if A is smaller. */ return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; @@ -441,13 +418,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { - if (parser->relocs[i].gobj) - drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); + struct radeon_bo *bo = parser->relocs[i].robj; + if (bo == NULL) + continue; + + drm_gem_object_unreference_unlocked(&bo->gem_base); } } kfree(parser->track); kfree(parser->relocs); - kfree(parser->relocs_ptr); drm_free_large(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); @@ -462,7 +441,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, { int r; - if (parser->chunk_ib_idx == -1) + if (parser->chunk_ib == NULL) return 0; if (parser->cs_flags & RADEON_CS_USE_VM) @@ -505,9 +484,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, if (r) return r; - radeon_sync_resv(p->rdev, &p->ib.sync, vm->page_directory->tbo.resv, - true); - r = radeon_vm_clear_freed(rdev, vm); if (r) return r; @@ -525,10 +501,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, for (i = 0; i < p->nrelocs; i++) { struct radeon_bo *bo; - /* ignore duplicates */ - if (p->relocs_ptr[i] != &p->relocs[i]) - continue; - bo = p->relocs[i].robj; bo_va = radeon_vm_bo_find(vm, bo); if (bo_va == NULL) { @@ -553,7 +525,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, struct radeon_vm *vm = &fpriv->vm; int r; - if (parser->chunk_ib_idx == -1) + if (parser->chunk_ib == NULL) return 0; if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) return 0; @@ -587,7 +559,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, } if ((rdev->family >= CHIP_TAHITI) && - (parser->chunk_const_ib_idx != -1)) { + (parser->chunk_const_ib != NULL)) { r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true); } else { r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); @@ -614,7 +586,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser struct radeon_vm *vm = NULL; int r; - if (parser->chunk_ib_idx == -1) + if (parser->chunk_ib == NULL) return 0; if (parser->cs_flags & RADEON_CS_USE_VM) { @@ -622,8 +594,8 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser vm = &fpriv->vm; if ((rdev->family >= CHIP_TAHITI) && - (parser->chunk_const_ib_idx != -1)) { - ib_chunk = &parser->chunks[parser->chunk_const_ib_idx]; + (parser->chunk_const_ib != NULL)) { + ib_chunk = parser->chunk_const_ib; if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw); return -EINVAL; @@ -642,13 +614,13 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser return -EFAULT; } - ib_chunk = &parser->chunks[parser->chunk_ib_idx]; + ib_chunk = parser->chunk_ib; if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw); return -EINVAL; } } - ib_chunk = &parser->chunks[parser->chunk_ib_idx]; + ib_chunk = parser->chunk_ib; r = radeon_ib_get(rdev, parser->ring, &parser->ib, vm, ib_chunk->length_dw * 4); @@ -740,7 +712,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx) { - struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; + struct radeon_cs_chunk *ib_chunk = p->chunk_ib; struct radeon_device *rdev = p->rdev; uint32_t header; @@ -834,7 +806,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p, * GPU offset using the provided start. **/ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, - struct radeon_cs_reloc **cs_reloc, + struct radeon_bo_list **cs_reloc, int nomm) { struct radeon_cs_chunk *relocs_chunk; @@ -842,12 +814,12 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, unsigned idx; int r; - if (p->chunk_relocs_idx == -1) { + if (p->chunk_relocs == NULL) { DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } *cs_reloc = NULL; - relocs_chunk = &p->chunks[p->chunk_relocs_idx]; + relocs_chunk = p->chunk_relocs; r = radeon_cs_packet_parse(p, &p3reloc, p->idx); if (r) return r; @@ -873,6 +845,6 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, (u64)relocs_chunk->kdata[idx + 3] << 32; (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; } else - *cs_reloc = p->relocs_ptr[(idx / 4)]; + *cs_reloc = &p->relocs[(idx / 4)]; return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 85f38ee11888..45e54060ee97 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -227,11 +227,24 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, return ret; } -static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, - uint64_t gpu_addr, int hot_x, int hot_y) +static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; + struct radeon_bo *robj = gem_to_radeon_bo(obj); + uint64_t gpu_addr; + int ret; + + ret = radeon_bo_reserve(robj, false); + if (unlikely(ret != 0)) + goto fail; + /* Only 27 bit offset for legacy cursor */ + ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM, + ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, + &gpu_addr); + radeon_bo_unreserve(robj); + if (ret) + goto fail; if (ASIC_IS_DCE4(rdev)) { WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, @@ -253,18 +266,12 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); } - if (hot_x != radeon_crtc->cursor_hot_x || - hot_y != radeon_crtc->cursor_hot_y) { - int x, y; - - x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x; - y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y; + return 0; - radeon_cursor_move_locked(crtc, x, y); +fail: + drm_gem_object_unreference_unlocked(obj); - radeon_crtc->cursor_hot_x = hot_x; - radeon_crtc->cursor_hot_y = hot_y; - } + return ret; } int radeon_crtc_cursor_set2(struct drm_crtc *crtc, @@ -276,10 +283,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - struct radeon_device *rdev = crtc->dev->dev_private; struct drm_gem_object *obj; - struct radeon_bo *robj; - uint64_t gpu_addr; int ret; if (!handle) { @@ -301,41 +305,76 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc, return -ENOENT; } - robj = gem_to_radeon_bo(obj); - ret = radeon_bo_reserve(robj, false); - if (unlikely(ret != 0)) - goto fail; - /* Only 27 bit offset for legacy cursor */ - ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM, - ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, - &gpu_addr); - radeon_bo_unreserve(robj); - if (ret) - goto fail; - radeon_crtc->cursor_width = width; radeon_crtc->cursor_height = height; radeon_lock_cursor(crtc, true); - radeon_set_cursor(crtc, obj, gpu_addr, hot_x, hot_y); - radeon_show_cursor(crtc); + + if (hot_x != radeon_crtc->cursor_hot_x || + hot_y != radeon_crtc->cursor_hot_y) { + int x, y; + + x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x; + y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y; + + radeon_cursor_move_locked(crtc, x, y); + + radeon_crtc->cursor_hot_x = hot_x; + radeon_crtc->cursor_hot_y = hot_y; + } + + ret = radeon_set_cursor(crtc, obj); + + if (ret) + DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n", + ret); + else + radeon_show_cursor(crtc); + radeon_lock_cursor(crtc, false); unpin: if (radeon_crtc->cursor_bo) { - robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); + struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); ret = radeon_bo_reserve(robj, false); if (likely(ret == 0)) { radeon_bo_unpin(robj); radeon_bo_unreserve(robj); } - drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); + if (radeon_crtc->cursor_bo != obj) + drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); } radeon_crtc->cursor_bo = obj; return 0; -fail: - drm_gem_object_unreference_unlocked(obj); +} - return ret; +/** + * radeon_cursor_reset - Re-set the current cursor, if any. + * + * @crtc: drm crtc + * + * If the CRTC passed in currently has a cursor assigned, this function + * makes sure it's visible. + */ +void radeon_cursor_reset(struct drm_crtc *crtc) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + int ret; + + if (radeon_crtc->cursor_bo) { + radeon_lock_cursor(crtc, true); + + radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x, + radeon_crtc->cursor_y); + + ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo); + if (ret) + DRM_ERROR("radeon_set_cursor returned %d, not showing " + "cursor\n", ret); + else + radeon_show_cursor(crtc); + + radeon_lock_cursor(crtc, false); + } } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index ae87310fd96e..0ec65168f331 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -983,6 +983,7 @@ int radeon_atombios_init(struct radeon_device *rdev) } mutex_init(&rdev->mode_info.atom_context->mutex); + mutex_init(&rdev->mode_info.atom_context->scratch_mutex); radeon_atom_initialize_bios_scratch_regs(rdev->ddev); atom_allocate_fb_scratch(rdev->mode_info.atom_context); return 0; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 9a19e52cc655..6b670b0bc47b 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -179,6 +179,9 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, (rdev->pdev->subsystem_vendor == 0x1734) && (rdev->pdev->subsystem_device == 0x1107)) use_bl = false; + /* disable native backlight control on older asics */ + else if (rdev->family < CHIP_R600) + use_bl = false; else use_bl = true; } diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 0ea1db83d573..29b9220ec399 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -48,10 +48,40 @@ struct radeon_fbdev { struct radeon_device *rdev; }; +/** + * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev. + * + * @info: fbdev info + * + * This function hides the cursor on all CRTCs used by fbdev. + */ +static int radeon_fb_helper_set_par(struct fb_info *info) +{ + int ret; + + ret = drm_fb_helper_set_par(info); + + /* XXX: with universal plane support fbdev will automatically disable + * all non-primary planes (including the cursor) + */ + if (ret == 0) { + struct drm_fb_helper *fb_helper = info->par; + int i; + + for (i = 0; i < fb_helper->crtc_count; i++) { + struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc; + + radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); + } + } + + return ret; +} + static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = drm_fb_helper_set_par, + .fb_set_par = radeon_fb_helper_set_par, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 12cfaeac1205..a46f73737994 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -394,10 +394,9 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, return r; } -static int radeon_mode_mmap(struct drm_file *filp, - struct drm_device *dev, - uint32_t handle, bool dumb, - uint64_t *offset_p) +int radeon_mode_dumb_mmap(struct drm_file *filp, + struct drm_device *dev, + uint32_t handle, uint64_t *offset_p) { struct drm_gem_object *gobj; struct radeon_bo *robj; @@ -406,14 +405,6 @@ static int radeon_mode_mmap(struct drm_file *filp, if (gobj == NULL) { return -ENOENT; } - - /* - * We don't allow dumb mmaps on objects created using another - * interface. - */ - WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach), - "Illegal dumb map of GPU buffer.\n"); - robj = gem_to_radeon_bo(gobj); if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { drm_gem_object_unreference_unlocked(gobj); @@ -424,20 +415,12 @@ static int radeon_mode_mmap(struct drm_file *filp, return 0; } -int radeon_mode_dumb_mmap(struct drm_file *filp, - struct drm_device *dev, - uint32_t handle, uint64_t *offset_p) -{ - return radeon_mode_mmap(filp, dev, handle, true, offset_p); -} - int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct drm_radeon_gem_mmap *args = data; - return radeon_mode_mmap(filp, dev, args->handle, false, - &args->addr_ptr); + return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); } int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, @@ -548,7 +531,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev, struct radeon_bo_va *bo_va) { struct ttm_validate_buffer tv, *entry; - struct radeon_cs_reloc *vm_bos; + struct radeon_bo_list *vm_bos; struct ww_acquire_ctx ticket; struct list_head list; unsigned domain; @@ -564,7 +547,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev, if (!vm_bos) return; - r = ttm_eu_reserve_buffers(&ticket, &list, true); + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); if (r) goto error_free; @@ -763,7 +746,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, return -ENOMEM; r = drm_gem_handle_create(file_priv, gobj, &handle); - gobj->dumb = true; /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(gobj); if (r) { diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 7784911d78ef..00fc59762e0d 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -185,6 +185,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_AGP) return false; + /* + * Older chips have a HW limitation, they can only generate 40 bits + * of address for "64-bit" MSIs which breaks on some platforms, notably + * IBM POWER servers, so we limit them + */ + if (rdev->family < CHIP_BONAIRE) { + dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n"); + rdev->pdev->no_64bit_msi = 1; + } + /* force MSI on */ if (radeon_msi == 1) return true; diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 065d02068ec3..d3e78b4a6b75 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -28,6 +28,8 @@ #include "cikd.h" #include "cik_reg.h" #include "radeon_kfd.h" +#include "radeon_ucode.h" +#include <linux/firmware.h> #define CIK_PIPE_PER_MEC (4) @@ -49,6 +51,7 @@ static uint64_t get_vmem_size(struct kgd_dev *kgd); static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); +static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); /* * Register access functions @@ -91,12 +94,14 @@ static const struct kfd2kgd_calls kfd2kgd = { .hqd_load = kgd_hqd_load, .hqd_is_occupies = kgd_hqd_is_occupies, .hqd_destroy = kgd_hqd_destroy, + .get_fw_version = get_fw_version }; static const struct kgd2kfd_calls *kgd2kfd; bool radeon_kfd_init(void) { +#if defined(CONFIG_HSA_AMD_MODULE) bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*, const struct kgd2kfd_calls**); @@ -113,6 +118,17 @@ bool radeon_kfd_init(void) } return true; +#elif defined(CONFIG_HSA_AMD) + if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) { + kgd2kfd = NULL; + + return false; + } + + return true; +#else + return false; +#endif } void radeon_kfd_fini(void) @@ -561,3 +577,52 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, release_queue(kgd); return 0; } + +static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) +{ + struct radeon_device *rdev = (struct radeon_device *) kgd; + const union radeon_firmware_header *hdr; + + BUG_ON(kgd == NULL || rdev->mec_fw == NULL); + + switch (type) { + case KGD_ENGINE_PFP: + hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data; + break; + + case KGD_ENGINE_ME: + hdr = (const union radeon_firmware_header *) rdev->me_fw->data; + break; + + case KGD_ENGINE_CE: + hdr = (const union radeon_firmware_header *) rdev->ce_fw->data; + break; + + case KGD_ENGINE_MEC1: + hdr = (const union radeon_firmware_header *) rdev->mec_fw->data; + break; + + case KGD_ENGINE_MEC2: + hdr = (const union radeon_firmware_header *) + rdev->mec2_fw->data; + break; + + case KGD_ENGINE_RLC: + hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data; + break; + + case KGD_ENGINE_SDMA: + hdr = (const union radeon_firmware_header *) + rdev->sdma_fw->data; + break; + + default: + return 0; + } + + if (hdr == NULL) + return 0; + + /* Only 12 bit in use*/ + return hdr->common.ucode_version; +} diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index f4dd26ae33e5..3cf9c1fa6475 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -800,6 +800,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, /* Get associated drm_crtc: */ drmcrtc = &rdev->mode_info.crtcs[crtc]->base; + if (!drmcrtc) + return -EINVAL; /* Helper routine in DRM core does all the work: */ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index cafb1ccf2ec3..678b4386540d 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -1054,6 +1054,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, DRM_ERROR("Mode need scaling but only first crtc can do that.\n"); } } + radeon_cursor_reset(crtc); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index f3d87cdd5c9d..390db897f322 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -818,6 +818,7 @@ extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y); extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); +extern void radeon_cursor_reset(struct drm_crtc *crtc); extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 87b00d902bf7..86fc56434b28 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -233,6 +233,13 @@ int radeon_bo_create(struct radeon_device *rdev, if (!(rdev->flags & RADEON_IS_PCIE)) bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); +#ifdef CONFIG_X86_32 + /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit + * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 + */ + bo->flags &= ~RADEON_GEM_GTT_WC; +#endif + radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); @@ -502,28 +509,26 @@ int radeon_bo_list_validate(struct radeon_device *rdev, struct ww_acquire_ctx *ticket, struct list_head *head, int ring) { - struct radeon_cs_reloc *lobj; - struct radeon_bo *bo; + struct radeon_bo_list *lobj; + struct list_head duplicates; int r; u64 bytes_moved = 0, initial_bytes_moved; u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); - r = ttm_eu_reserve_buffers(ticket, head, true); + INIT_LIST_HEAD(&duplicates); + r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); if (unlikely(r != 0)) { return r; } list_for_each_entry(lobj, head, tv.head) { - bo = lobj->robj; + struct radeon_bo *bo = lobj->robj; if (!bo->pin_count) { u32 domain = lobj->prefered_domains; u32 allowed = lobj->allowed_domains; u32 current_domain = radeon_mem_type_to_domain(bo->tbo.mem.mem_type); - WARN_ONCE(bo->gem_base.dumb, - "GPU use of dumb buffer is illegal.\n"); - /* Check if this buffer will be moved and don't move it * if we have moved too many buffers for this IB already. * @@ -562,6 +567,12 @@ int radeon_bo_list_validate(struct radeon_device *rdev, lobj->gpu_offset = radeon_bo_gpu_offset(bo); lobj->tiling_flags = bo->tiling_flags; } + + list_for_each_entry(lobj, &duplicates, tv.head) { + lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj); + lobj->tiling_flags = lobj->robj->tiling_flags; + } + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h index 9db74a96ef61..ce075cb08cb2 100644 --- a/drivers/gpu/drm/radeon/radeon_trace.h +++ b/drivers/gpu/drm/radeon/radeon_trace.h @@ -38,7 +38,7 @@ TRACE_EVENT(radeon_cs, TP_fast_assign( __entry->ring = p->ring; - __entry->dw = p->chunks[p->chunk_ib_idx].length_dw; + __entry->dw = p->chunk_ib->length_dw; __entry->fences = radeon_fence_count_emitted( p->rdev, p->ring); ), diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index cbe7b32d181c..d02aa1d0f588 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -196,7 +196,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, rbo = container_of(bo, struct radeon_bo, tbo); switch (bo->mem.mem_type) { case TTM_PL_VRAM: - if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) + if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false) radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size && bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) { diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 11b662469253..c10b2aec6450 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -488,12 +488,12 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, unsigned buf_sizes[], bool *has_msg_cmd) { struct radeon_cs_chunk *relocs_chunk; - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; unsigned idx, cmd, offset; uint64_t start, end; int r; - relocs_chunk = &p->chunks[p->chunk_relocs_idx]; + relocs_chunk = p->chunk_relocs; offset = radeon_get_ib_value(p, data0); idx = radeon_get_ib_value(p, data1); if (idx >= relocs_chunk->length_dw) { @@ -502,7 +502,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, return -EINVAL; } - reloc = p->relocs_ptr[(idx / 4)]; + reloc = &p->relocs[(idx / 4)]; start = reloc->gpu_offset; end = start + radeon_bo_size(reloc->robj); start += offset; @@ -610,13 +610,13 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) [0x00000003] = 2048, }; - if (p->chunks[p->chunk_ib_idx].length_dw % 16) { + if (p->chunk_ib->length_dw % 16) { DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", - p->chunks[p->chunk_ib_idx].length_dw); + p->chunk_ib->length_dw); return -EINVAL; } - if (p->chunk_relocs_idx == -1) { + if (p->chunk_relocs == NULL) { DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } @@ -640,7 +640,7 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("Unknown packet type %d !\n", pkt.type); return -EINVAL; } - } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); + } while (p->idx < p->chunk_ib->length_dw); if (!has_msg_cmd) { DRM_ERROR("UVD-IBs need a msg command!\n"); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index 9e85757d5599..976fe432f4e2 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -453,11 +453,11 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size) { struct radeon_cs_chunk *relocs_chunk; - struct radeon_cs_reloc *reloc; + struct radeon_bo_list *reloc; uint64_t start, end, offset; unsigned idx; - relocs_chunk = &p->chunks[p->chunk_relocs_idx]; + relocs_chunk = p->chunk_relocs; offset = radeon_get_ib_value(p, lo); idx = radeon_get_ib_value(p, hi); @@ -467,7 +467,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, return -EINVAL; } - reloc = p->relocs_ptr[(idx / 4)]; + reloc = &p->relocs[(idx / 4)]; start = reloc->gpu_offset; end = start + radeon_bo_size(reloc->robj); start += offset; @@ -534,7 +534,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) uint32_t *size = &tmp; int i, r; - while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { + while (p->idx < p->chunk_ib->length_dw) { uint32_t len = radeon_get_ib_value(p, p->idx); uint32_t cmd = radeon_get_ib_value(p, p->idx + 1); diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 0b10f3a03ce2..cde48c42b30a 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -125,41 +125,37 @@ void radeon_vm_manager_fini(struct radeon_device *rdev) * Add the page directory to the list of BOs to * validate for command submission (cayman+). */ -struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, +struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, struct radeon_vm *vm, struct list_head *head) { - struct radeon_cs_reloc *list; + struct radeon_bo_list *list; unsigned i, idx; list = drm_malloc_ab(vm->max_pde_used + 2, - sizeof(struct radeon_cs_reloc)); + sizeof(struct radeon_bo_list)); if (!list) return NULL; /* add the vm page table to the list */ - list[0].gobj = NULL; list[0].robj = vm->page_directory; list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; list[0].tv.bo = &vm->page_directory->tbo; list[0].tv.shared = true; list[0].tiling_flags = 0; - list[0].handle = 0; list_add(&list[0].tv.head, head); for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { if (!vm->page_tables[i].bo) continue; - list[idx].gobj = NULL; list[idx].robj = vm->page_tables[i].bo; list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].tv.bo = &list[idx].robj->tbo; list[idx].tv.shared = true; list[idx].tiling_flags = 0; - list[idx].handle = 0; list_add(&list[idx++].tv.head, head); } @@ -491,7 +487,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, tmp->vm = vm; tmp->addr = bo_va->addr; tmp->bo = radeon_bo_ref(bo_va->bo); + spin_lock(&vm->status_lock); list_add(&tmp->vm_status, &vm->freed); + spin_unlock(&vm->status_lock); } interval_tree_remove(&bo_va->it, &vm->va); @@ -802,11 +800,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev, * * Global and local mutex must be locked! */ -static void radeon_vm_update_ptes(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_ib *ib, - uint64_t start, uint64_t end, - uint64_t dst, uint32_t flags) +static int radeon_vm_update_ptes(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_ib *ib, + uint64_t start, uint64_t end, + uint64_t dst, uint32_t flags) { uint64_t mask = RADEON_VM_PTE_COUNT - 1; uint64_t last_pte = ~0, last_dst = ~0; @@ -819,8 +817,12 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, struct radeon_bo *pt = vm->page_tables[pt_idx].bo; unsigned nptes; uint64_t pte; + int r; radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true); + r = reservation_object_reserve_shared(pt->tbo.resv); + if (r) + return r; if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; @@ -854,6 +856,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, last_pte + 8 * count, last_dst, flags); } + + return 0; } /** @@ -878,7 +882,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm, end >>= radeon_vm_block_size; for (i = start; i <= end; ++i) - radeon_bo_fence(vm->page_tables[i].bo, fence, false); + radeon_bo_fence(vm->page_tables[i].bo, fence, true); } /** @@ -911,7 +915,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev, return -EINVAL; } + spin_lock(&vm->status_lock); list_del_init(&bo_va->vm_status); + spin_unlock(&vm->status_lock); bo_va->flags &= ~RADEON_VM_PAGE_VALID; bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; @@ -987,9 +993,13 @@ int radeon_vm_bo_update(struct radeon_device *rdev, radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use); } - radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, - bo_va->it.last + 1, addr, - radeon_vm_page_flags(bo_va->flags)); + r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, + bo_va->it.last + 1, addr, + radeon_vm_page_flags(bo_va->flags)); + if (r) { + radeon_ib_free(rdev, &ib); + return r; + } radeon_asic_vm_pad_ib(rdev, &ib); WARN_ON(ib.length_dw > ndw); @@ -1022,17 +1032,25 @@ int radeon_vm_bo_update(struct radeon_device *rdev, int radeon_vm_clear_freed(struct radeon_device *rdev, struct radeon_vm *vm) { - struct radeon_bo_va *bo_va, *tmp; + struct radeon_bo_va *bo_va; int r; - list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { + spin_lock(&vm->status_lock); + while (!list_empty(&vm->freed)) { + bo_va = list_first_entry(&vm->freed, + struct radeon_bo_va, vm_status); + spin_unlock(&vm->status_lock); + r = radeon_vm_bo_update(rdev, bo_va, NULL); radeon_bo_unref(&bo_va->bo); radeon_fence_unref(&bo_va->last_pt_update); kfree(bo_va); if (r) return r; + + spin_lock(&vm->status_lock); } + spin_unlock(&vm->status_lock); return 0; } @@ -1051,14 +1069,23 @@ int radeon_vm_clear_freed(struct radeon_device *rdev, int radeon_vm_clear_invalids(struct radeon_device *rdev, struct radeon_vm *vm) { - struct radeon_bo_va *bo_va, *tmp; + struct radeon_bo_va *bo_va; int r; - list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) { + spin_lock(&vm->status_lock); + while (!list_empty(&vm->invalidated)) { + bo_va = list_first_entry(&vm->invalidated, + struct radeon_bo_va, vm_status); + spin_unlock(&vm->status_lock); + r = radeon_vm_bo_update(rdev, bo_va, NULL); if (r) return r; + + spin_lock(&vm->status_lock); } + spin_unlock(&vm->status_lock); + return 0; } @@ -1081,6 +1108,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev, mutex_lock(&vm->mutex); interval_tree_remove(&bo_va->it, &vm->va); + spin_lock(&vm->status_lock); list_del(&bo_va->vm_status); if (bo_va->addr) { @@ -1090,6 +1118,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev, radeon_fence_unref(&bo_va->last_pt_update); kfree(bo_va); } + spin_unlock(&vm->status_lock); mutex_unlock(&vm->mutex); } @@ -1110,10 +1139,10 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev, list_for_each_entry(bo_va, &bo->va, bo_list) { if (bo_va->addr) { - mutex_lock(&bo_va->vm->mutex); + spin_lock(&bo_va->vm->status_lock); list_del(&bo_va->vm_status); list_add(&bo_va->vm_status, &bo_va->vm->invalidated); - mutex_unlock(&bo_va->vm->mutex); + spin_unlock(&bo_va->vm->status_lock); } } } @@ -1141,6 +1170,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) } mutex_init(&vm->mutex); vm->va = RB_ROOT; + spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->freed); diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 5f6db4629aaa..9acb1c3c005b 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -879,6 +879,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev) u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; /* FIXME: implement full support */ + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 3462b64369bf..0a2d36e81108 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -579,6 +579,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev) u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 8a477bf1fdb3..c55d653aaf5f 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -1277,6 +1277,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev) struct drm_display_mode *mode0 = NULL; struct drm_display_mode *mode1 = NULL; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 14896ce76324..60df444bd075 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2384,6 +2384,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev) u32 num_heads = 0, lb_size; int i; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); for (i = 0; i < rdev->num_crtc; i++) { diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index cf4c420b5572..32e354b8b0ab 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -5893,7 +5893,7 @@ static void si_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode) tmp |= TMIN(0); WREG32(CG_FDO_CTRL2, tmp); - tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; + tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; tmp |= FDO_PWM_MODE(mode); WREG32(CG_FDO_CTRL2, tmp); } @@ -6098,7 +6098,7 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev, tmp |= TARGET_PERIOD(tach_period); WREG32(CG_TACH_CTRL, tmp); - si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); + si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM); return 0; } @@ -6114,7 +6114,7 @@ static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev) tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode); WREG32(CG_FDO_CTRL2, tmp); - tmp = RREG32(CG_FDO_CTRL2) & TMIN_MASK; + tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; tmp |= TMIN(si_pi->t_min); WREG32(CG_FDO_CTRL2, tmp); si_pi->fan_ctrl_is_in_default_mode = true; diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index c549c16a4fe4..4069be89e585 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -208,18 +208,18 @@ #define CG_FDO_CTRL0 0x754 #define FDO_STATIC_DUTY(x) ((x) << 0) -#define FDO_STATIC_DUTY_MASK 0x0000000F +#define FDO_STATIC_DUTY_MASK 0x000000FF #define FDO_STATIC_DUTY_SHIFT 0 #define CG_FDO_CTRL1 0x758 #define FMAX_DUTY100(x) ((x) << 0) -#define FMAX_DUTY100_MASK 0x0000000F +#define FMAX_DUTY100_MASK 0x000000FF #define FMAX_DUTY100_SHIFT 0 #define CG_FDO_CTRL2 0x75C #define TMIN(x) ((x) << 0) -#define TMIN_MASK 0x0000000F +#define TMIN_MASK 0x000000FF #define TMIN_SHIFT 0 #define FDO_PWM_MODE(x) ((x) << 11) -#define FDO_PWM_MODE_MASK (3 << 11) +#define FDO_PWM_MODE_MASK (7 << 11) #define FDO_PWM_MODE_SHIFT 11 #define TACH_PWM_RESP_RATE(x) ((x) << 25) #define TACH_PWM_RESP_RATE_MASK (0x7f << 25) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 967ae8f20233..7bfa09cf18d5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -328,7 +328,6 @@ static struct platform_driver rcar_du_platform_driver = { .probe = rcar_du_probe, .remove = rcar_du_remove, .driver = { - .owner = THIS_MODULE, .name = "rcar-du", .pm = &rcar_du_pm_ops, .of_match_table = rcar_du_of_table, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c index 8abaaf258f45..4d7d4dd46d26 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c @@ -25,7 +25,8 @@ static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = connector->encoder; + struct rcar_du_connector *con = to_rcar_connector(connector); + struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder); struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); if (sfuncs->get_modes == NULL) @@ -37,7 +38,8 @@ static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector) static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct drm_encoder *encoder = connector->encoder; + struct rcar_du_connector *con = to_rcar_connector(connector); + struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder); struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); if (sfuncs->mode_valid == NULL) @@ -61,7 +63,8 @@ static void rcar_du_hdmi_connector_destroy(struct drm_connector *connector) static enum drm_connector_status rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force) { - struct drm_encoder *encoder = connector->encoder; + struct rcar_du_connector *con = to_rcar_connector(connector); + struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder); struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); if (sfuncs->detect == NULL) diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig new file mode 100644 index 000000000000..ca9f085efa92 --- /dev/null +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -0,0 +1,17 @@ +config DRM_ROCKCHIP + tristate "DRM Support for Rockchip" + depends on DRM && ROCKCHIP_IOMMU + select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER + select DRM_PANEL + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE + select VIDEOMODE_HELPERS + help + Choose this option if you have a Rockchip soc chipset. + This driver provides kernel mode setting and buffer + management to userspace. This driver does not provide + 2D or 3D acceleration; acceleration is performed by other + IP found on the SoC. diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile new file mode 100644 index 000000000000..2cb0672f57ed --- /dev/null +++ b/drivers/gpu/drm/rockchip/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \ + rockchip_drm_gem.o + +obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c new file mode 100644 index 000000000000..a798c7c71f91 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -0,0 +1,551 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author:Mark Yao <mark.yao@rock-chips.com> + * + * based on exynos_drm_drv.c + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <asm/dma-iommu.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> +#include <linux/of_graph.h> +#include <linux/component.h> + +#include "rockchip_drm_drv.h" +#include "rockchip_drm_fb.h" +#include "rockchip_drm_fbdev.h" +#include "rockchip_drm_gem.h" + +#define DRIVER_NAME "rockchip" +#define DRIVER_DESC "RockChip Soc DRM" +#define DRIVER_DATE "20140818" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +/* + * Attach a (component) device to the shared drm dma mapping from master drm + * device. This is used by the VOPs to map GEM buffers to a common DMA + * mapping. + */ +int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, + struct device *dev) +{ + struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping; + int ret; + + ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + + return arm_iommu_attach_device(dev, mapping); +} +EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device); + +void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, + struct device *dev) +{ + arm_iommu_detach_device(dev); +} +EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device); + +int rockchip_register_crtc_funcs(struct drm_device *dev, + const struct rockchip_crtc_funcs *crtc_funcs, + int pipe) +{ + struct rockchip_drm_private *priv = dev->dev_private; + + if (pipe > ROCKCHIP_MAX_CRTC) + return -EINVAL; + + priv->crtc_funcs[pipe] = crtc_funcs; + + return 0; +} +EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs); + +void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe) +{ + struct rockchip_drm_private *priv = dev->dev_private; + + if (pipe > ROCKCHIP_MAX_CRTC) + return; + + priv->crtc_funcs[pipe] = NULL; +} +EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs); + +static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm, + int pipe) +{ + struct drm_crtc *crtc; + int i = 0; + + list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) + if (i++ == pipe) + return crtc; + + return NULL; +} + +static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev, int pipe) +{ + struct rockchip_drm_private *priv = dev->dev_private; + struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe); + + if (crtc && priv->crtc_funcs[pipe] && + priv->crtc_funcs[pipe]->enable_vblank) + return priv->crtc_funcs[pipe]->enable_vblank(crtc); + + return 0; +} + +static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, int pipe) +{ + struct rockchip_drm_private *priv = dev->dev_private; + struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe); + + if (crtc && priv->crtc_funcs[pipe] && + priv->crtc_funcs[pipe]->enable_vblank) + priv->crtc_funcs[pipe]->disable_vblank(crtc); +} + +static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) +{ + struct rockchip_drm_private *private; + struct dma_iommu_mapping *mapping; + struct device *dev = drm_dev->dev; + int ret; + + private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL); + if (!private) + return -ENOMEM; + + drm_dev->dev_private = private; + + drm_mode_config_init(drm_dev); + + rockchip_drm_mode_config_init(drm_dev); + + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), + GFP_KERNEL); + if (!dev->dma_parms) { + ret = -ENOMEM; + goto err_config_cleanup; + } + + /* TODO(djkurtz): fetch the mapping start/size from somewhere */ + mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000, + SZ_2G); + if (IS_ERR(mapping)) { + ret = PTR_ERR(mapping); + goto err_config_cleanup; + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) + goto err_release_mapping; + + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + + ret = arm_iommu_attach_device(dev, mapping); + if (ret) + goto err_release_mapping; + + /* Try to bind all sub drivers. */ + ret = component_bind_all(dev, drm_dev); + if (ret) + goto err_detach_device; + + /* init kms poll for handling hpd */ + drm_kms_helper_poll_init(drm_dev); + + /* + * enable drm irq mode. + * - with irq_enabled = true, we can use the vblank feature. + */ + drm_dev->irq_enabled = true; + + ret = drm_vblank_init(drm_dev, ROCKCHIP_MAX_CRTC); + if (ret) + goto err_kms_helper_poll_fini; + + /* + * with vblank_disable_allowed = true, vblank interrupt will be disabled + * by drm timer once a current process gives up ownership of + * vblank event.(after drm_vblank_put function is called) + */ + drm_dev->vblank_disable_allowed = true; + + ret = rockchip_drm_fbdev_init(drm_dev); + if (ret) + goto err_vblank_cleanup; + + return 0; +err_vblank_cleanup: + drm_vblank_cleanup(drm_dev); +err_kms_helper_poll_fini: + drm_kms_helper_poll_fini(drm_dev); + component_unbind_all(dev, drm_dev); +err_detach_device: + arm_iommu_detach_device(dev); +err_release_mapping: + arm_iommu_release_mapping(dev->archdata.mapping); +err_config_cleanup: + drm_mode_config_cleanup(drm_dev); + drm_dev->dev_private = NULL; + return ret; +} + +static int rockchip_drm_unload(struct drm_device *drm_dev) +{ + struct device *dev = drm_dev->dev; + + rockchip_drm_fbdev_fini(drm_dev); + drm_vblank_cleanup(drm_dev); + drm_kms_helper_poll_fini(drm_dev); + component_unbind_all(dev, drm_dev); + arm_iommu_detach_device(dev); + arm_iommu_release_mapping(dev->archdata.mapping); + drm_mode_config_cleanup(drm_dev); + drm_dev->dev_private = NULL; + + return 0; +} + +void rockchip_drm_lastclose(struct drm_device *dev) +{ + struct rockchip_drm_private *priv = dev->dev_private; + + drm_fb_helper_restore_fbdev_mode_unlocked(&priv->fbdev_helper); +} + +static const struct file_operations rockchip_drm_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .mmap = rockchip_gem_mmap, + .poll = drm_poll, + .read = drm_read, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .release = drm_release, +}; + +const struct vm_operations_struct rockchip_drm_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static struct drm_driver rockchip_drm_driver = { + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, + .load = rockchip_drm_load, + .unload = rockchip_drm_unload, + .lastclose = rockchip_drm_lastclose, + .get_vblank_counter = drm_vblank_count, + .enable_vblank = rockchip_drm_crtc_enable_vblank, + .disable_vblank = rockchip_drm_crtc_disable_vblank, + .gem_vm_ops = &rockchip_drm_vm_ops, + .gem_free_object = rockchip_gem_free_object, + .dumb_create = rockchip_gem_dumb_create, + .dumb_map_offset = rockchip_gem_dumb_map_offset, + .dumb_destroy = drm_gem_dumb_destroy, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table, + .gem_prime_vmap = rockchip_gem_prime_vmap, + .gem_prime_vunmap = rockchip_gem_prime_vunmap, + .gem_prime_mmap = rockchip_gem_mmap_buf, + .fops = &rockchip_drm_driver_fops, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, +}; + +#ifdef CONFIG_PM_SLEEP +static int rockchip_drm_sys_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct drm_connector *connector; + + if (!drm) + return 0; + + drm_modeset_lock_all(drm); + list_for_each_entry(connector, &drm->mode_config.connector_list, head) { + int old_dpms = connector->dpms; + + if (connector->funcs->dpms) + connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); + + /* Set the old mode back to the connector for resume */ + connector->dpms = old_dpms; + } + drm_modeset_unlock_all(drm); + + return 0; +} + +static int rockchip_drm_sys_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct drm_connector *connector; + enum drm_connector_status status; + bool changed = false; + + if (!drm) + return 0; + + drm_modeset_lock_all(drm); + list_for_each_entry(connector, &drm->mode_config.connector_list, head) { + int desired_mode = connector->dpms; + + /* + * at suspend time, we save dpms to connector->dpms, + * restore the old_dpms, and at current time, the connector + * dpms status must be DRM_MODE_DPMS_OFF. + */ + connector->dpms = DRM_MODE_DPMS_OFF; + + /* + * If the connector has been disconnected during suspend, + * disconnect it from the encoder and leave it off. We'll notify + * userspace at the end. + */ + if (desired_mode == DRM_MODE_DPMS_ON) { + status = connector->funcs->detect(connector, true); + if (status == connector_status_disconnected) { + connector->encoder = NULL; + connector->status = status; + changed = true; + continue; + } + } + if (connector->funcs->dpms) + connector->funcs->dpms(connector, desired_mode); + } + drm_modeset_unlock_all(drm); + + drm_helper_resume_force_mode(drm); + + if (changed) + drm_kms_helper_hotplug_event(drm); + + return 0; +} +#endif + +static const struct dev_pm_ops rockchip_drm_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rockchip_drm_sys_suspend, + rockchip_drm_sys_resume) +}; + +/* + * @node: device tree node containing encoder input ports + * @encoder: drm_encoder + */ +int rockchip_drm_encoder_get_mux_id(struct device_node *node, + struct drm_encoder *encoder) +{ + struct device_node *ep = NULL; + struct drm_crtc *crtc = encoder->crtc; + struct of_endpoint endpoint; + struct device_node *port; + int ret; + + if (!node || !crtc) + return -EINVAL; + + do { + ep = of_graph_get_next_endpoint(node, ep); + if (!ep) + break; + + port = of_graph_get_remote_port(ep); + of_node_put(port); + if (port == crtc->port) { + ret = of_graph_parse_endpoint(ep, &endpoint); + return ret ?: endpoint.id; + } + } while (ep); + + return -EINVAL; +} + +static int compare_of(struct device *dev, void *data) +{ + struct device_node *np = data; + + return dev->of_node == np; +} + +static void rockchip_add_endpoints(struct device *dev, + struct component_match **match, + struct device_node *port) +{ + struct device_node *ep, *remote; + + for_each_child_of_node(port, ep) { + remote = of_graph_get_remote_port_parent(ep); + if (!remote || !of_device_is_available(remote)) { + of_node_put(remote); + continue; + } else if (!of_device_is_available(remote->parent)) { + dev_warn(dev, "parent device of %s is not available\n", + remote->full_name); + of_node_put(remote); + continue; + } + + component_match_add(dev, match, compare_of, remote); + of_node_put(remote); + } +} + +static int rockchip_drm_bind(struct device *dev) +{ + struct drm_device *drm; + int ret; + + drm = drm_dev_alloc(&rockchip_drm_driver, dev); + if (!drm) + return -ENOMEM; + + ret = drm_dev_set_unique(drm, "%s", dev_name(dev)); + if (ret) + goto err_free; + + ret = drm_dev_register(drm, 0); + if (ret) + goto err_free; + + dev_set_drvdata(dev, drm); + + return 0; + +err_free: + drm_dev_unref(drm); + return ret; +} + +static void rockchip_drm_unbind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + drm_dev_unregister(drm); + drm_dev_unref(drm); + dev_set_drvdata(dev, NULL); +} + +static const struct component_master_ops rockchip_drm_ops = { + .bind = rockchip_drm_bind, + .unbind = rockchip_drm_unbind, +}; + +static int rockchip_drm_platform_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct component_match *match = NULL; + struct device_node *np = dev->of_node; + struct device_node *port; + int i; + + if (!np) + return -ENODEV; + /* + * Bind the crtc ports first, so that + * drm_of_find_possible_crtcs called from encoder .bind callbacks + * works as expected. + */ + for (i = 0;; i++) { + port = of_parse_phandle(np, "ports", i); + if (!port) + break; + + if (!of_device_is_available(port->parent)) { + of_node_put(port); + continue; + } + + component_match_add(dev, &match, compare_of, port->parent); + of_node_put(port); + } + + if (i == 0) { + dev_err(dev, "missing 'ports' property\n"); + return -ENODEV; + } + + if (!match) { + dev_err(dev, "No available vop found for display-subsystem.\n"); + return -ENODEV; + } + /* + * For each bound crtc, bind the encoders attached to its + * remote endpoint. + */ + for (i = 0;; i++) { + port = of_parse_phandle(np, "ports", i); + if (!port) + break; + + if (!of_device_is_available(port->parent)) { + of_node_put(port); + continue; + } + + rockchip_add_endpoints(dev, &match, port); + of_node_put(port); + } + + return component_master_add_with_match(dev, &rockchip_drm_ops, match); +} + +static int rockchip_drm_platform_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &rockchip_drm_ops); + + return 0; +} + +static const struct of_device_id rockchip_drm_dt_ids[] = { + { .compatible = "rockchip,display-subsystem", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids); + +static struct platform_driver rockchip_drm_platform_driver = { + .probe = rockchip_drm_platform_probe, + .remove = rockchip_drm_platform_remove, + .driver = { + .owner = THIS_MODULE, + .name = "rockchip-drm", + .of_match_table = rockchip_drm_dt_ids, + .pm = &rockchip_drm_pm_ops, + }, +}; + +module_platform_driver(rockchip_drm_platform_driver); + +MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>"); +MODULE_DESCRIPTION("ROCKCHIP DRM Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h new file mode 100644 index 000000000000..dc4e5f03ac79 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author:Mark Yao <mark.yao@rock-chips.com> + * + * based on exynos_drm_drv.h + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ROCKCHIP_DRM_DRV_H +#define _ROCKCHIP_DRM_DRV_H + +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem.h> + +#include <linux/module.h> +#include <linux/component.h> + +#define ROCKCHIP_MAX_FB_BUFFER 3 +#define ROCKCHIP_MAX_CONNECTOR 2 +#define ROCKCHIP_MAX_CRTC 2 + +struct drm_device; +struct drm_connector; + +/* + * Rockchip drm private crtc funcs. + * @enable_vblank: enable crtc vblank irq. + * @disable_vblank: disable crtc vblank irq. + */ +struct rockchip_crtc_funcs { + int (*enable_vblank)(struct drm_crtc *crtc); + void (*disable_vblank)(struct drm_crtc *crtc); +}; + +/* + * Rockchip drm private structure. + * + * @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc. + * @num_pipe: number of pipes for this device. + */ +struct rockchip_drm_private { + struct drm_fb_helper fbdev_helper; + struct drm_gem_object *fbdev_bo; + const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; +}; + +int rockchip_register_crtc_funcs(struct drm_device *dev, + const struct rockchip_crtc_funcs *crtc_funcs, + int pipe); +void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe); +int rockchip_drm_encoder_get_mux_id(struct device_node *node, + struct drm_encoder *encoder); +int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type, + int out_mode); +int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, + struct device *dev); +void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, + struct device *dev); + +#endif /* _ROCKCHIP_DRM_DRV_H_ */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c new file mode 100644 index 000000000000..77d52893d40f --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -0,0 +1,201 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author:Mark Yao <mark.yao@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <drm/drm.h> +#include <drm/drmP.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_crtc_helper.h> + +#include "rockchip_drm_drv.h" +#include "rockchip_drm_gem.h" + +#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb) + +struct rockchip_drm_fb { + struct drm_framebuffer fb; + struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER]; +}; + +struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb, + unsigned int plane) +{ + struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb); + + if (plane >= ROCKCHIP_MAX_FB_BUFFER) + return NULL; + + return rk_fb->obj[plane]; +} +EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj); + +static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb) +{ + struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb); + struct drm_gem_object *obj; + int i; + + for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) { + obj = rockchip_fb->obj[i]; + if (obj) + drm_gem_object_unreference_unlocked(obj); + } + + drm_framebuffer_cleanup(fb); + kfree(rockchip_fb); +} + +static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle) +{ + struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb); + + return drm_gem_handle_create(file_priv, + rockchip_fb->obj[0], handle); +} + +static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = { + .destroy = rockchip_drm_fb_destroy, + .create_handle = rockchip_drm_fb_create_handle, +}; + +static struct rockchip_drm_fb * +rockchip_fb_alloc(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object **obj, unsigned int num_planes) +{ + struct rockchip_drm_fb *rockchip_fb; + int ret; + int i; + + rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL); + if (!rockchip_fb) + return ERR_PTR(-ENOMEM); + + drm_helper_mode_fill_fb_struct(&rockchip_fb->fb, mode_cmd); + + for (i = 0; i < num_planes; i++) + rockchip_fb->obj[i] = obj[i]; + + ret = drm_framebuffer_init(dev, &rockchip_fb->fb, + &rockchip_drm_fb_funcs); + if (ret) { + dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", + ret); + kfree(rockchip_fb); + return ERR_PTR(ret); + } + + return rockchip_fb; +} + +static struct drm_framebuffer * +rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, + struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct rockchip_drm_fb *rockchip_fb; + struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER]; + struct drm_gem_object *obj; + unsigned int hsub; + unsigned int vsub; + int num_planes; + int ret; + int i; + + hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); + vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); + num_planes = min(drm_format_num_planes(mode_cmd->pixel_format), + ROCKCHIP_MAX_FB_BUFFER); + + for (i = 0; i < num_planes; i++) { + unsigned int width = mode_cmd->width / (i ? hsub : 1); + unsigned int height = mode_cmd->height / (i ? vsub : 1); + unsigned int min_size; + + obj = drm_gem_object_lookup(dev, file_priv, + mode_cmd->handles[i]); + if (!obj) { + dev_err(dev->dev, "Failed to lookup GEM object\n"); + ret = -ENXIO; + goto err_gem_object_unreference; + } + + min_size = (height - 1) * mode_cmd->pitches[i] + + mode_cmd->offsets[i] + + width * drm_format_plane_cpp(mode_cmd->pixel_format, i); + + if (obj->size < min_size) { + drm_gem_object_unreference_unlocked(obj); + ret = -EINVAL; + goto err_gem_object_unreference; + } + objs[i] = obj; + } + + rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i); + if (IS_ERR(rockchip_fb)) { + ret = PTR_ERR(rockchip_fb); + goto err_gem_object_unreference; + } + + return &rockchip_fb->fb; + +err_gem_object_unreference: + for (i--; i >= 0; i--) + drm_gem_object_unreference_unlocked(objs[i]); + return ERR_PTR(ret); +} + +static void rockchip_drm_output_poll_changed(struct drm_device *dev) +{ + struct rockchip_drm_private *private = dev->dev_private; + struct drm_fb_helper *fb_helper = &private->fbdev_helper; + + drm_fb_helper_hotplug_event(fb_helper); +} + +static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { + .fb_create = rockchip_user_fb_create, + .output_poll_changed = rockchip_drm_output_poll_changed, +}; + +struct drm_framebuffer * +rockchip_drm_framebuffer_init(struct drm_device *dev, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj) +{ + struct rockchip_drm_fb *rockchip_fb; + + rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1); + if (IS_ERR(rockchip_fb)) + return NULL; + + return &rockchip_fb->fb; +} + +void rockchip_drm_mode_config_init(struct drm_device *dev) +{ + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + + /* + * set max width and height as default value(4096x4096). + * this value would be used to check framebuffer size limitation + * at drm_mode_addfb(). + */ + dev->mode_config.max_width = 4096; + dev->mode_config.max_height = 4096; + + dev->mode_config.funcs = &rockchip_drm_mode_config_funcs; +} diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h new file mode 100644 index 000000000000..09574d48226f --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author:Mark Yao <mark.yao@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ROCKCHIP_DRM_FB_H +#define _ROCKCHIP_DRM_FB_H + +struct drm_framebuffer * +rockchip_drm_framebuffer_init(struct drm_device *dev, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj); +void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb); + +void rockchip_drm_mode_config_init(struct drm_device *dev); + +struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb, + unsigned int plane); +#endif /* _ROCKCHIP_DRM_FB_H */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c new file mode 100644 index 000000000000..a5d889a8716b --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c @@ -0,0 +1,210 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author:Mark Yao <mark.yao@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <drm/drm.h> +#include <drm/drmP.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_crtc_helper.h> + +#include "rockchip_drm_drv.h" +#include "rockchip_drm_gem.h" +#include "rockchip_drm_fb.h" + +#define PREFERRED_BPP 32 +#define to_drm_private(x) \ + container_of(x, struct rockchip_drm_private, fbdev_helper) + +static int rockchip_fbdev_mmap(struct fb_info *info, + struct vm_area_struct *vma) +{ + struct drm_fb_helper *helper = info->par; + struct rockchip_drm_private *private = to_drm_private(helper); + + return rockchip_gem_mmap_buf(private->fbdev_bo, vma); +} + +static struct fb_ops rockchip_drm_fbdev_ops = { + .owner = THIS_MODULE, + .fb_mmap = rockchip_fbdev_mmap, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_blank = drm_fb_helper_blank, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_setcmap = drm_fb_helper_setcmap, +}; + +static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct rockchip_drm_private *private = to_drm_private(helper); + struct drm_mode_fb_cmd2 mode_cmd = { 0 }; + struct drm_device *dev = helper->dev; + struct rockchip_gem_object *rk_obj; + struct drm_framebuffer *fb; + unsigned int bytes_per_pixel; + unsigned long offset; + struct fb_info *fbi; + size_t size; + int ret; + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel; + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + size = mode_cmd.pitches[0] * mode_cmd.height; + + rk_obj = rockchip_gem_create_object(dev, size); + if (IS_ERR(rk_obj)) + return -ENOMEM; + + private->fbdev_bo = &rk_obj->base; + + fbi = framebuffer_alloc(0, dev->dev); + if (!fbi) { + dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); + ret = -ENOMEM; + goto err_rockchip_gem_free_object; + } + + helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd, + private->fbdev_bo); + if (IS_ERR(helper->fb)) { + dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); + ret = PTR_ERR(helper->fb); + goto err_framebuffer_release; + } + + helper->fbdev = fbi; + + fbi->par = helper; + fbi->flags = FBINFO_FLAG_DEFAULT; + fbi->fbops = &rockchip_drm_fbdev_ops; + + ret = fb_alloc_cmap(&fbi->cmap, 256, 0); + if (ret) { + dev_err(dev->dev, "Failed to allocate color map.\n"); + goto err_drm_framebuffer_unref; + } + + fb = helper->fb; + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); + drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); + + offset = fbi->var.xoffset * bytes_per_pixel; + offset += fbi->var.yoffset * fb->pitches[0]; + + dev->mode_config.fb_base = 0; + fbi->screen_base = rk_obj->kvaddr + offset; + fbi->screen_size = rk_obj->base.size; + fbi->fix.smem_len = rk_obj->base.size; + + DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n", + fb->width, fb->height, fb->depth, rk_obj->kvaddr, + offset, size); + return 0; + +err_drm_framebuffer_unref: + drm_framebuffer_unreference(helper->fb); +err_framebuffer_release: + framebuffer_release(fbi); +err_rockchip_gem_free_object: + rockchip_gem_free_object(&rk_obj->base); + return ret; +} + +static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = { + .fb_probe = rockchip_drm_fbdev_create, +}; + +int rockchip_drm_fbdev_init(struct drm_device *dev) +{ + struct rockchip_drm_private *private = dev->dev_private; + struct drm_fb_helper *helper; + unsigned int num_crtc; + int ret; + + if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) + return -EINVAL; + + num_crtc = dev->mode_config.num_crtc; + + helper = &private->fbdev_helper; + + drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs); + + ret = drm_fb_helper_init(dev, helper, num_crtc, ROCKCHIP_MAX_CONNECTOR); + if (ret < 0) { + dev_err(dev->dev, "Failed to initialize drm fb helper - %d.\n", + ret); + return ret; + } + + ret = drm_fb_helper_single_add_all_connectors(helper); + if (ret < 0) { + dev_err(dev->dev, "Failed to add connectors - %d.\n", ret); + goto err_drm_fb_helper_fini; + } + + /* disable all the possible outputs/crtcs before entering KMS mode */ + drm_helper_disable_unused_functions(dev); + + ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); + if (ret < 0) { + dev_err(dev->dev, "Failed to set initial hw config - %d.\n", + ret); + goto err_drm_fb_helper_fini; + } + + return 0; + +err_drm_fb_helper_fini: + drm_fb_helper_fini(helper); + return ret; +} + +void rockchip_drm_fbdev_fini(struct drm_device *dev) +{ + struct rockchip_drm_private *private = dev->dev_private; + struct drm_fb_helper *helper; + + helper = &private->fbdev_helper; + + if (helper->fbdev) { + struct fb_info *info; + int ret; + + info = helper->fbdev; + ret = unregister_framebuffer(info); + if (ret < 0) + DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n", + ret); + + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + + framebuffer_release(info); + } + + if (helper->fb) + drm_framebuffer_unreference(helper->fb); + + drm_fb_helper_fini(helper); +} diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h new file mode 100644 index 000000000000..50432e9b5b37 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author:Mark Yao <mark.yao@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ROCKCHIP_DRM_FBDEV_H +#define _ROCKCHIP_DRM_FBDEV_H + +int rockchip_drm_fbdev_init(struct drm_device *dev); +void rockchip_drm_fbdev_fini(struct drm_device *dev); + +#endif /* _ROCKCHIP_DRM_FBDEV_H */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c new file mode 100644 index 000000000000..bc98a227dc76 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -0,0 +1,294 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author:Mark Yao <mark.yao@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <drm/drm.h> +#include <drm/drmP.h> +#include <drm/drm_gem.h> +#include <drm/drm_vma_manager.h> + +#include <linux/dma-attrs.h> + +#include "rockchip_drm_drv.h" +#include "rockchip_drm_gem.h" + +static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj) +{ + struct drm_gem_object *obj = &rk_obj->base; + struct drm_device *drm = obj->dev; + + init_dma_attrs(&rk_obj->dma_attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs); + + /* TODO(djkurtz): Use DMA_ATTR_NO_KERNEL_MAPPING except for fbdev */ + rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, + &rk_obj->dma_addr, GFP_KERNEL, + &rk_obj->dma_attrs); + if (IS_ERR(rk_obj->kvaddr)) { + int ret = PTR_ERR(rk_obj->kvaddr); + + DRM_ERROR("failed to allocate %#x byte dma buffer, %d", + obj->size, ret); + return ret; + } + + return 0; +} + +static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) +{ + struct drm_gem_object *obj = &rk_obj->base; + struct drm_device *drm = obj->dev; + + dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr, + &rk_obj->dma_attrs); +} + +int rockchip_gem_mmap_buf(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); + struct drm_device *drm = obj->dev; + unsigned long vm_size; + + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; + vm_size = vma->vm_end - vma->vm_start; + + if (vm_size > obj->size) + return -EINVAL; + + return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, + obj->size, &rk_obj->dma_attrs); +} + +/* drm driver mmap file operations */ +int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->minor->dev; + struct drm_gem_object *obj; + struct drm_vma_offset_node *node; + int ret; + + if (drm_device_is_unplugged(dev)) + return -ENODEV; + + mutex_lock(&dev->struct_mutex); + + node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, + vma->vm_pgoff, + vma_pages(vma)); + if (!node) { + mutex_unlock(&dev->struct_mutex); + DRM_ERROR("failed to find vma node.\n"); + return -EINVAL; + } else if (!drm_vma_node_is_allowed(node, filp)) { + mutex_unlock(&dev->struct_mutex); + return -EACCES; + } + + obj = container_of(node, struct drm_gem_object, vma_node); + ret = rockchip_gem_mmap_buf(obj, vma); + + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +struct rockchip_gem_object * + rockchip_gem_create_object(struct drm_device *drm, unsigned int size) +{ + struct rockchip_gem_object *rk_obj; + struct drm_gem_object *obj; + int ret; + + size = round_up(size, PAGE_SIZE); + + rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL); + if (!rk_obj) + return ERR_PTR(-ENOMEM); + + obj = &rk_obj->base; + + drm_gem_private_object_init(drm, obj, size); + + ret = rockchip_gem_alloc_buf(rk_obj); + if (ret) + goto err_free_rk_obj; + + return rk_obj; + +err_free_rk_obj: + kfree(rk_obj); + return ERR_PTR(ret); +} + +/* + * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback + * function + */ +void rockchip_gem_free_object(struct drm_gem_object *obj) +{ + struct rockchip_gem_object *rk_obj; + + drm_gem_free_mmap_offset(obj); + + rk_obj = to_rockchip_obj(obj); + + rockchip_gem_free_buf(rk_obj); + + kfree(rk_obj); +} + +/* + * rockchip_gem_create_with_handle - allocate an object with the given + * size and create a gem handle on it + * + * returns a struct rockchip_gem_object* on success or ERR_PTR values + * on failure. + */ +static struct rockchip_gem_object * +rockchip_gem_create_with_handle(struct drm_file *file_priv, + struct drm_device *drm, unsigned int size, + unsigned int *handle) +{ + struct rockchip_gem_object *rk_obj; + struct drm_gem_object *obj; + int ret; + + rk_obj = rockchip_gem_create_object(drm, size); + if (IS_ERR(rk_obj)) + return ERR_CAST(rk_obj); + + obj = &rk_obj->base; + + /* + * allocate a id of idr table where the obj is registered + * and handle has the id what user can see. + */ + ret = drm_gem_handle_create(file_priv, obj, handle); + if (ret) + goto err_handle_create; + + /* drop reference from allocate - handle holds it now. */ + drm_gem_object_unreference_unlocked(obj); + + return rk_obj; + +err_handle_create: + rockchip_gem_free_object(obj); + + return ERR_PTR(ret); +} + +int rockchip_gem_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset) +{ + struct drm_gem_object *obj; + int ret; + + mutex_lock(&dev->struct_mutex); + + obj = drm_gem_object_lookup(dev, file_priv, handle); + if (!obj) { + DRM_ERROR("failed to lookup gem object.\n"); + ret = -EINVAL; + goto unlock; + } + + ret = drm_gem_create_mmap_offset(obj); + if (ret) + goto out; + + *offset = drm_vma_node_offset_addr(&obj->vma_node); + DRM_DEBUG_KMS("offset = 0x%llx\n", *offset); + +out: + drm_gem_object_unreference(obj); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/* + * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback + * function + * + * This aligns the pitch and size arguments to the minimum required. wrap + * this into your own function if you need bigger alignment. + */ +int rockchip_gem_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct rockchip_gem_object *rk_obj; + int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + + /* + * align to 64 bytes since Mali requires it. + */ + min_pitch = ALIGN(min_pitch, 64); + + if (args->pitch < min_pitch) + args->pitch = min_pitch; + + if (args->size < args->pitch * args->height) + args->size = args->pitch * args->height; + + rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, + &args->handle); + + return PTR_ERR_OR_ZERO(rk_obj); +} + +/* + * Allocate a sg_table for this GEM object. + * Note: Both the table's contents, and the sg_table itself must be freed by + * the caller. + * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. + */ +struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); + struct drm_device *drm = obj->dev; + struct sg_table *sgt; + int ret; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, + rk_obj->dma_addr, obj->size, + &rk_obj->dma_attrs); + if (ret) { + DRM_ERROR("failed to allocate sgt, %d\n", ret); + kfree(sgt); + return ERR_PTR(ret); + } + + return sgt; +} + +void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) +{ + struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); + + return rk_obj->kvaddr; +} + +void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + /* Nothing to do */ +} diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h new file mode 100644 index 000000000000..67bcebe90003 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author:Mark Yao <mark.yao@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ROCKCHIP_DRM_GEM_H +#define _ROCKCHIP_DRM_GEM_H + +#define to_rockchip_obj(x) container_of(x, struct rockchip_gem_object, base) + +struct rockchip_gem_object { + struct drm_gem_object base; + unsigned int flags; + + void *kvaddr; + dma_addr_t dma_addr; + struct dma_attrs dma_attrs; +}; + +struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object * +rockchip_gem_prime_import_sg_table(struct drm_device *dev, size_t size, + struct sg_table *sgt); +void *rockchip_gem_prime_vmap(struct drm_gem_object *obj); +void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); + +/* drm driver mmap file operations */ +int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma); + +/* mmap a gem object to userspace. */ +int rockchip_gem_mmap_buf(struct drm_gem_object *obj, + struct vm_area_struct *vma); + +struct rockchip_gem_object * + rockchip_gem_create_object(struct drm_device *drm, unsigned int size); + +void rockchip_gem_free_object(struct drm_gem_object *obj); + +int rockchip_gem_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); +int rockchip_gem_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset); +#endif /* _ROCKCHIP_DRM_GEM_H */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c new file mode 100644 index 000000000000..e7ca25b3fb38 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -0,0 +1,1455 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author:Mark Yao <mark.yao@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <drm/drm.h> +#include <drm/drmP.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> + +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pm_runtime.h> +#include <linux/component.h> + +#include <linux/reset.h> +#include <linux/delay.h> + +#include "rockchip_drm_drv.h" +#include "rockchip_drm_gem.h" +#include "rockchip_drm_fb.h" +#include "rockchip_drm_vop.h" + +#define VOP_REG(off, _mask, s) \ + {.offset = off, \ + .mask = _mask, \ + .shift = s,} + +#define __REG_SET_RELAXED(x, off, mask, shift, v) \ + vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift) +#define __REG_SET_NORMAL(x, off, mask, shift, v) \ + vop_mask_write(x, off, (mask) << shift, (v) << shift) + +#define REG_SET(x, base, reg, v, mode) \ + __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) + +#define VOP_WIN_SET(x, win, name, v) \ + REG_SET(x, win->base, win->phy->name, v, RELAXED) +#define VOP_CTRL_SET(x, name, v) \ + REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL) + +#define VOP_WIN_GET(x, win, name) \ + vop_read_reg(x, win->base, &win->phy->name) + +#define VOP_WIN_GET_YRGBADDR(vop, win) \ + vop_readl(vop, win->base + win->phy->yrgb_mst.offset) + +#define to_vop(x) container_of(x, struct vop, crtc) +#define to_vop_win(x) container_of(x, struct vop_win, base) + +struct vop_win_state { + struct list_head head; + struct drm_framebuffer *fb; + dma_addr_t yrgb_mst; + struct drm_pending_vblank_event *event; +}; + +struct vop_win { + struct drm_plane base; + const struct vop_win_data *data; + struct vop *vop; + + struct list_head pending; + struct vop_win_state *active; +}; + +struct vop { + struct drm_crtc crtc; + struct device *dev; + struct drm_device *drm_dev; + unsigned int dpms; + + int connector_type; + int connector_out_mode; + + /* mutex vsync_ work */ + struct mutex vsync_mutex; + bool vsync_work_pending; + + const struct vop_data *data; + + uint32_t *regsbak; + void __iomem *regs; + + /* physical map length of vop register */ + uint32_t len; + + /* one time only one process allowed to config the register */ + spinlock_t reg_lock; + /* lock vop irq reg */ + spinlock_t irq_lock; + + unsigned int irq; + + /* vop AHP clk */ + struct clk *hclk; + /* vop dclk */ + struct clk *dclk; + /* vop share memory frequency */ + struct clk *aclk; + + /* vop dclk reset */ + struct reset_control *dclk_rst; + + int pipe; + + struct vop_win win[]; +}; + +enum vop_data_format { + VOP_FMT_ARGB8888 = 0, + VOP_FMT_RGB888, + VOP_FMT_RGB565, + VOP_FMT_YUV420SP = 4, + VOP_FMT_YUV422SP, + VOP_FMT_YUV444SP, +}; + +struct vop_reg_data { + uint32_t offset; + uint32_t value; +}; + +struct vop_reg { + uint32_t offset; + uint32_t shift; + uint32_t mask; +}; + +struct vop_ctrl { + struct vop_reg standby; + struct vop_reg data_blank; + struct vop_reg gate_en; + struct vop_reg mmu_en; + struct vop_reg rgb_en; + struct vop_reg edp_en; + struct vop_reg hdmi_en; + struct vop_reg mipi_en; + struct vop_reg out_mode; + struct vop_reg dither_down; + struct vop_reg dither_up; + struct vop_reg pin_pol; + + struct vop_reg htotal_pw; + struct vop_reg hact_st_end; + struct vop_reg vtotal_pw; + struct vop_reg vact_st_end; + struct vop_reg hpost_st_end; + struct vop_reg vpost_st_end; +}; + +struct vop_win_phy { + const uint32_t *data_formats; + uint32_t nformats; + + struct vop_reg enable; + struct vop_reg format; + struct vop_reg act_info; + struct vop_reg dsp_info; + struct vop_reg dsp_st; + struct vop_reg yrgb_mst; + struct vop_reg uv_mst; + struct vop_reg yrgb_vir; + struct vop_reg uv_vir; + + struct vop_reg dst_alpha_ctl; + struct vop_reg src_alpha_ctl; +}; + +struct vop_win_data { + uint32_t base; + const struct vop_win_phy *phy; + enum drm_plane_type type; +}; + +struct vop_data { + const struct vop_reg_data *init_table; + unsigned int table_size; + const struct vop_ctrl *ctrl; + const struct vop_win_data *win; + unsigned int win_size; +}; + +static const uint32_t formats_01[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_RGB565, + DRM_FORMAT_NV12, + DRM_FORMAT_NV16, + DRM_FORMAT_NV24, +}; + +static const uint32_t formats_234[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_RGB565, +}; + +static const struct vop_win_phy win01_data = { + .data_formats = formats_01, + .nformats = ARRAY_SIZE(formats_01), + .enable = VOP_REG(WIN0_CTRL0, 0x1, 0), + .format = VOP_REG(WIN0_CTRL0, 0x7, 1), + .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0), + .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0), + .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0), + .yrgb_mst = VOP_REG(WIN0_YRGB_MST, 0xffffffff, 0), + .uv_mst = VOP_REG(WIN0_CBR_MST, 0xffffffff, 0), + .yrgb_vir = VOP_REG(WIN0_VIR, 0x3fff, 0), + .uv_vir = VOP_REG(WIN0_VIR, 0x3fff, 16), + .src_alpha_ctl = VOP_REG(WIN0_SRC_ALPHA_CTRL, 0xff, 0), + .dst_alpha_ctl = VOP_REG(WIN0_DST_ALPHA_CTRL, 0xff, 0), +}; + +static const struct vop_win_phy win23_data = { + .data_formats = formats_234, + .nformats = ARRAY_SIZE(formats_234), + .enable = VOP_REG(WIN2_CTRL0, 0x1, 0), + .format = VOP_REG(WIN2_CTRL0, 0x7, 1), + .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0), + .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0), + .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0), + .yrgb_vir = VOP_REG(WIN2_VIR0_1, 0x1fff, 0), + .src_alpha_ctl = VOP_REG(WIN2_SRC_ALPHA_CTRL, 0xff, 0), + .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0), +}; + +static const struct vop_win_phy cursor_data = { + .data_formats = formats_234, + .nformats = ARRAY_SIZE(formats_234), + .enable = VOP_REG(HWC_CTRL0, 0x1, 0), + .format = VOP_REG(HWC_CTRL0, 0x7, 1), + .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0), + .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0), +}; + +static const struct vop_ctrl ctrl_data = { + .standby = VOP_REG(SYS_CTRL, 0x1, 22), + .gate_en = VOP_REG(SYS_CTRL, 0x1, 23), + .mmu_en = VOP_REG(SYS_CTRL, 0x1, 20), + .rgb_en = VOP_REG(SYS_CTRL, 0x1, 12), + .hdmi_en = VOP_REG(SYS_CTRL, 0x1, 13), + .edp_en = VOP_REG(SYS_CTRL, 0x1, 14), + .mipi_en = VOP_REG(SYS_CTRL, 0x1, 15), + .dither_down = VOP_REG(DSP_CTRL1, 0xf, 1), + .dither_up = VOP_REG(DSP_CTRL1, 0x1, 6), + .data_blank = VOP_REG(DSP_CTRL0, 0x1, 19), + .out_mode = VOP_REG(DSP_CTRL0, 0xf, 0), + .pin_pol = VOP_REG(DSP_CTRL0, 0xf, 4), + .htotal_pw = VOP_REG(DSP_HTOTAL_HS_END, 0x1fff1fff, 0), + .hact_st_end = VOP_REG(DSP_HACT_ST_END, 0x1fff1fff, 0), + .vtotal_pw = VOP_REG(DSP_VTOTAL_VS_END, 0x1fff1fff, 0), + .vact_st_end = VOP_REG(DSP_VACT_ST_END, 0x1fff1fff, 0), + .hpost_st_end = VOP_REG(POST_DSP_HACT_INFO, 0x1fff1fff, 0), + .vpost_st_end = VOP_REG(POST_DSP_VACT_INFO, 0x1fff1fff, 0), +}; + +static const struct vop_reg_data vop_init_reg_table[] = { + {SYS_CTRL, 0x00c00000}, + {DSP_CTRL0, 0x00000000}, + {WIN0_CTRL0, 0x00000080}, + {WIN1_CTRL0, 0x00000080}, +}; + +/* + * Note: rk3288 has a dedicated 'cursor' window, however, that window requires + * special support to get alpha blending working. For now, just use overlay + * window 1 for the drm cursor. + */ +static const struct vop_win_data rk3288_vop_win_data[] = { + { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY }, + { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR }, + { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, + { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, + { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY }, +}; + +static const struct vop_data rk3288_vop = { + .init_table = vop_init_reg_table, + .table_size = ARRAY_SIZE(vop_init_reg_table), + .ctrl = &ctrl_data, + .win = rk3288_vop_win_data, + .win_size = ARRAY_SIZE(rk3288_vop_win_data), +}; + +static const struct of_device_id vop_driver_dt_match[] = { + { .compatible = "rockchip,rk3288-vop", + .data = &rk3288_vop }, + {}, +}; + +static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) +{ + writel(v, vop->regs + offset); + vop->regsbak[offset >> 2] = v; +} + +static inline uint32_t vop_readl(struct vop *vop, uint32_t offset) +{ + return readl(vop->regs + offset); +} + +static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base, + const struct vop_reg *reg) +{ + return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask; +} + +static inline void vop_cfg_done(struct vop *vop) +{ + writel(0x01, vop->regs + REG_CFG_DONE); +} + +static inline void vop_mask_write(struct vop *vop, uint32_t offset, + uint32_t mask, uint32_t v) +{ + if (mask) { + uint32_t cached_val = vop->regsbak[offset >> 2]; + + cached_val = (cached_val & ~mask) | v; + writel(cached_val, vop->regs + offset); + vop->regsbak[offset >> 2] = cached_val; + } +} + +static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset, + uint32_t mask, uint32_t v) +{ + if (mask) { + uint32_t cached_val = vop->regsbak[offset >> 2]; + + cached_val = (cached_val & ~mask) | v; + writel_relaxed(cached_val, vop->regs + offset); + vop->regsbak[offset >> 2] = cached_val; + } +} + +static enum vop_data_format vop_convert_format(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + return VOP_FMT_ARGB8888; + case DRM_FORMAT_RGB888: + return VOP_FMT_RGB888; + case DRM_FORMAT_RGB565: + return VOP_FMT_RGB565; + case DRM_FORMAT_NV12: + return VOP_FMT_YUV420SP; + case DRM_FORMAT_NV16: + return VOP_FMT_YUV422SP; + case DRM_FORMAT_NV24: + return VOP_FMT_YUV444SP; + default: + DRM_ERROR("unsupport format[%08x]\n", format); + return -EINVAL; + } +} + +static bool is_alpha_support(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_ARGB8888: + return true; + default: + return false; + } +} + +static void vop_enable(struct drm_crtc *crtc) +{ + struct vop *vop = to_vop(crtc); + int ret; + + ret = clk_enable(vop->hclk); + if (ret < 0) { + dev_err(vop->dev, "failed to enable hclk - %d\n", ret); + return; + } + + ret = clk_enable(vop->dclk); + if (ret < 0) { + dev_err(vop->dev, "failed to enable dclk - %d\n", ret); + goto err_disable_hclk; + } + + ret = clk_enable(vop->aclk); + if (ret < 0) { + dev_err(vop->dev, "failed to enable aclk - %d\n", ret); + goto err_disable_dclk; + } + + /* + * Slave iommu shares power, irq and clock with vop. It was associated + * automatically with this master device via common driver code. + * Now that we have enabled the clock we attach it to the shared drm + * mapping. + */ + ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev); + if (ret) { + dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret); + goto err_disable_aclk; + } + + spin_lock(&vop->reg_lock); + + VOP_CTRL_SET(vop, standby, 0); + + spin_unlock(&vop->reg_lock); + + enable_irq(vop->irq); + + drm_vblank_on(vop->drm_dev, vop->pipe); + + return; + +err_disable_aclk: + clk_disable(vop->aclk); +err_disable_dclk: + clk_disable(vop->dclk); +err_disable_hclk: + clk_disable(vop->hclk); +} + +static void vop_disable(struct drm_crtc *crtc) +{ + struct vop *vop = to_vop(crtc); + + drm_vblank_off(crtc->dev, vop->pipe); + + disable_irq(vop->irq); + + /* + * TODO: Since standby doesn't take effect until the next vblank, + * when we turn off dclk below, the vop is probably still active. + */ + spin_lock(&vop->reg_lock); + + VOP_CTRL_SET(vop, standby, 1); + + spin_unlock(&vop->reg_lock); + /* + * disable dclk to stop frame scan, so we can safely detach iommu, + */ + clk_disable(vop->dclk); + + rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev); + + clk_disable(vop->aclk); + clk_disable(vop->hclk); +} + +/* + * Caller must hold vsync_mutex. + */ +static struct drm_framebuffer *vop_win_last_pending_fb(struct vop_win *vop_win) +{ + struct vop_win_state *last; + struct vop_win_state *active = vop_win->active; + + if (list_empty(&vop_win->pending)) + return active ? active->fb : NULL; + + last = list_last_entry(&vop_win->pending, struct vop_win_state, head); + return last ? last->fb : NULL; +} + +/* + * Caller must hold vsync_mutex. + */ +static int vop_win_queue_fb(struct vop_win *vop_win, + struct drm_framebuffer *fb, dma_addr_t yrgb_mst, + struct drm_pending_vblank_event *event) +{ + struct vop_win_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + state->fb = fb; + state->yrgb_mst = yrgb_mst; + state->event = event; + + list_add_tail(&state->head, &vop_win->pending); + + return 0; +} + +static int vop_update_plane_event(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, + int crtc_y, unsigned int crtc_w, + unsigned int crtc_h, uint32_t src_x, + uint32_t src_y, uint32_t src_w, + uint32_t src_h, + struct drm_pending_vblank_event *event) +{ + struct vop_win *vop_win = to_vop_win(plane); + const struct vop_win_data *win = vop_win->data; + struct vop *vop = to_vop(crtc); + struct drm_gem_object *obj; + struct rockchip_gem_object *rk_obj; + unsigned long offset; + unsigned int actual_w; + unsigned int actual_h; + unsigned int dsp_stx; + unsigned int dsp_sty; + unsigned int y_vir_stride; + dma_addr_t yrgb_mst; + enum vop_data_format format; + uint32_t val; + bool is_alpha; + bool visible; + int ret; + struct drm_rect dest = { + .x1 = crtc_x, + .y1 = crtc_y, + .x2 = crtc_x + crtc_w, + .y2 = crtc_y + crtc_h, + }; + struct drm_rect src = { + /* 16.16 fixed point */ + .x1 = src_x, + .y1 = src_y, + .x2 = src_x + src_w, + .y2 = src_y + src_h, + }; + const struct drm_rect clip = { + .x2 = crtc->mode.hdisplay, + .y2 = crtc->mode.vdisplay, + }; + bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY; + + ret = drm_plane_helper_check_update(plane, crtc, fb, + &src, &dest, &clip, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + can_position, false, &visible); + if (ret) + return ret; + + if (!visible) + return 0; + + is_alpha = is_alpha_support(fb->pixel_format); + format = vop_convert_format(fb->pixel_format); + if (format < 0) + return format; + + obj = rockchip_fb_get_gem_obj(fb, 0); + if (!obj) { + DRM_ERROR("fail to get rockchip gem object from framebuffer\n"); + return -EINVAL; + } + + rk_obj = to_rockchip_obj(obj); + + actual_w = (src.x2 - src.x1) >> 16; + actual_h = (src.y2 - src.y1) >> 16; + crtc_x = max(0, crtc_x); + crtc_y = max(0, crtc_y); + + dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start; + dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start; + + offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3); + offset += (src.y1 >> 16) * fb->pitches[0]; + yrgb_mst = rk_obj->dma_addr + offset; + + y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3); + + /* + * If this plane update changes the plane's framebuffer, (or more + * precisely, if this update has a different framebuffer than the last + * update), enqueue it so we can track when it completes. + * + * Only when we discover that this update has completed, can we + * unreference any previous framebuffers. + */ + mutex_lock(&vop->vsync_mutex); + if (fb != vop_win_last_pending_fb(vop_win)) { + ret = drm_vblank_get(plane->dev, vop->pipe); + if (ret) { + DRM_ERROR("failed to get vblank, %d\n", ret); + mutex_unlock(&vop->vsync_mutex); + return ret; + } + + drm_framebuffer_reference(fb); + + ret = vop_win_queue_fb(vop_win, fb, yrgb_mst, event); + if (ret) { + drm_vblank_put(plane->dev, vop->pipe); + mutex_unlock(&vop->vsync_mutex); + return ret; + } + + vop->vsync_work_pending = true; + } + mutex_unlock(&vop->vsync_mutex); + + spin_lock(&vop->reg_lock); + + VOP_WIN_SET(vop, win, format, format); + VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride); + VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst); + val = (actual_h - 1) << 16; + val |= (actual_w - 1) & 0xffff; + VOP_WIN_SET(vop, win, act_info, val); + VOP_WIN_SET(vop, win, dsp_info, val); + val = (dsp_sty - 1) << 16; + val |= (dsp_stx - 1) & 0xffff; + VOP_WIN_SET(vop, win, dsp_st, val); + + if (is_alpha) { + VOP_WIN_SET(vop, win, dst_alpha_ctl, + DST_FACTOR_M0(ALPHA_SRC_INVERSE)); + val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) | + SRC_ALPHA_M0(ALPHA_STRAIGHT) | + SRC_BLEND_M0(ALPHA_PER_PIX) | + SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) | + SRC_FACTOR_M0(ALPHA_ONE); + VOP_WIN_SET(vop, win, src_alpha_ctl, val); + } else { + VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0)); + } + + VOP_WIN_SET(vop, win, enable, 1); + + vop_cfg_done(vop); + spin_unlock(&vop->reg_lock); + + return 0; +} + +static int vop_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, uint32_t src_w, + uint32_t src_h) +{ + return vop_update_plane_event(plane, crtc, fb, crtc_x, crtc_y, crtc_w, + crtc_h, src_x, src_y, src_w, src_h, + NULL); +} + +static int vop_update_primary_plane(struct drm_crtc *crtc, + struct drm_pending_vblank_event *event) +{ + unsigned int crtc_w, crtc_h; + + crtc_w = crtc->primary->fb->width - crtc->x; + crtc_h = crtc->primary->fb->height - crtc->y; + + return vop_update_plane_event(crtc->primary, crtc, crtc->primary->fb, + 0, 0, crtc_w, crtc_h, crtc->x << 16, + crtc->y << 16, crtc_w << 16, + crtc_h << 16, event); +} + +static int vop_disable_plane(struct drm_plane *plane) +{ + struct vop_win *vop_win = to_vop_win(plane); + const struct vop_win_data *win = vop_win->data; + struct vop *vop; + int ret; + + if (!plane->crtc) + return 0; + + vop = to_vop(plane->crtc); + + ret = drm_vblank_get(plane->dev, vop->pipe); + if (ret) { + DRM_ERROR("failed to get vblank, %d\n", ret); + return ret; + } + + mutex_lock(&vop->vsync_mutex); + + ret = vop_win_queue_fb(vop_win, NULL, 0, NULL); + if (ret) { + drm_vblank_put(plane->dev, vop->pipe); + mutex_unlock(&vop->vsync_mutex); + return ret; + } + + vop->vsync_work_pending = true; + mutex_unlock(&vop->vsync_mutex); + + spin_lock(&vop->reg_lock); + VOP_WIN_SET(vop, win, enable, 0); + vop_cfg_done(vop); + spin_unlock(&vop->reg_lock); + + return 0; +} + +static void vop_plane_destroy(struct drm_plane *plane) +{ + vop_disable_plane(plane); + drm_plane_cleanup(plane); +} + +static const struct drm_plane_funcs vop_plane_funcs = { + .update_plane = vop_update_plane, + .disable_plane = vop_disable_plane, + .destroy = vop_plane_destroy, +}; + +int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, + int connector_type, + int out_mode) +{ + struct vop *vop = to_vop(crtc); + + vop->connector_type = connector_type; + vop->connector_out_mode = out_mode; + + return 0; +} + +static int vop_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct vop *vop = to_vop(crtc); + unsigned long flags; + + if (vop->dpms != DRM_MODE_DPMS_ON) + return -EPERM; + + spin_lock_irqsave(&vop->irq_lock, flags); + + vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(1)); + + spin_unlock_irqrestore(&vop->irq_lock, flags); + + return 0; +} + +static void vop_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct vop *vop = to_vop(crtc); + unsigned long flags; + + if (vop->dpms != DRM_MODE_DPMS_ON) + return; + spin_lock_irqsave(&vop->irq_lock, flags); + vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0)); + spin_unlock_irqrestore(&vop->irq_lock, flags); +} + +static const struct rockchip_crtc_funcs private_crtc_funcs = { + .enable_vblank = vop_crtc_enable_vblank, + .disable_vblank = vop_crtc_disable_vblank, +}; + +static void vop_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct vop *vop = to_vop(crtc); + + DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); + + if (vop->dpms == mode) { + DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); + return; + } + + switch (mode) { + case DRM_MODE_DPMS_ON: + vop_enable(crtc); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + vop_disable(crtc); + break; + default: + DRM_DEBUG_KMS("unspecified mode %d\n", mode); + break; + } + + vop->dpms = mode; +} + +static void vop_crtc_prepare(struct drm_crtc *crtc) +{ + vop_crtc_dpms(crtc, DRM_MODE_DPMS_ON); +} + +static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0) + return false; + + return true; +} + +static int vop_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + int ret; + + crtc->x = x; + crtc->y = y; + + ret = vop_update_primary_plane(crtc, NULL); + if (ret < 0) { + DRM_ERROR("fail to update plane\n"); + return ret; + } + + return 0; +} + +static int vop_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *fb) +{ + struct vop *vop = to_vop(crtc); + u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; + u16 hdisplay = adjusted_mode->hdisplay; + u16 htotal = adjusted_mode->htotal; + u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start; + u16 hact_end = hact_st + hdisplay; + u16 vdisplay = adjusted_mode->vdisplay; + u16 vtotal = adjusted_mode->vtotal; + u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start; + u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start; + u16 vact_end = vact_st + vdisplay; + int ret; + uint32_t val; + + /* + * disable dclk to stop frame scan, so that we can safe config mode and + * enable iommu. + */ + clk_disable(vop->dclk); + + switch (vop->connector_type) { + case DRM_MODE_CONNECTOR_LVDS: + VOP_CTRL_SET(vop, rgb_en, 1); + break; + case DRM_MODE_CONNECTOR_eDP: + VOP_CTRL_SET(vop, edp_en, 1); + break; + case DRM_MODE_CONNECTOR_HDMIA: + VOP_CTRL_SET(vop, hdmi_en, 1); + break; + default: + DRM_ERROR("unsupport connector_type[%d]\n", + vop->connector_type); + return -EINVAL; + }; + VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode); + + val = 0x8; + val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0; + val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? (1 << 1) : 0; + VOP_CTRL_SET(vop, pin_pol, val); + + VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len); + val = hact_st << 16; + val |= hact_end; + VOP_CTRL_SET(vop, hact_st_end, val); + VOP_CTRL_SET(vop, hpost_st_end, val); + + VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len); + val = vact_st << 16; + val |= vact_end; + VOP_CTRL_SET(vop, vact_st_end, val); + VOP_CTRL_SET(vop, vpost_st_end, val); + + ret = vop_crtc_mode_set_base(crtc, x, y, fb); + if (ret) + return ret; + + /* + * reset dclk, take all mode config affect, so the clk would run in + * correct frame. + */ + reset_control_assert(vop->dclk_rst); + usleep_range(10, 20); + reset_control_deassert(vop->dclk_rst); + + clk_set_rate(vop->dclk, adjusted_mode->clock * 1000); + ret = clk_enable(vop->dclk); + if (ret < 0) { + dev_err(vop->dev, "failed to enable dclk - %d\n", ret); + return ret; + } + + return 0; +} + +static void vop_crtc_commit(struct drm_crtc *crtc) +{ +} + +static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { + .dpms = vop_crtc_dpms, + .prepare = vop_crtc_prepare, + .mode_fixup = vop_crtc_mode_fixup, + .mode_set = vop_crtc_mode_set, + .mode_set_base = vop_crtc_mode_set_base, + .commit = vop_crtc_commit, +}; + +static int vop_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) +{ + struct vop *vop = to_vop(crtc); + struct drm_framebuffer *old_fb = crtc->primary->fb; + int ret; + + /* when the page flip is requested, crtc's dpms should be on */ + if (vop->dpms > DRM_MODE_DPMS_ON) { + DRM_DEBUG("failed page flip request at dpms[%d].\n", vop->dpms); + return 0; + } + + crtc->primary->fb = fb; + + ret = vop_update_primary_plane(crtc, event); + if (ret) + crtc->primary->fb = old_fb; + + return ret; +} + +static void vop_win_state_complete(struct vop_win *vop_win, + struct vop_win_state *state) +{ + struct vop *vop = vop_win->vop; + struct drm_crtc *crtc = &vop->crtc; + struct drm_device *drm = crtc->dev; + unsigned long flags; + + if (state->event) { + spin_lock_irqsave(&drm->event_lock, flags); + drm_send_vblank_event(drm, -1, state->event); + spin_unlock_irqrestore(&drm->event_lock, flags); + } + + list_del(&state->head); + drm_vblank_put(crtc->dev, vop->pipe); +} + +static void vop_crtc_destroy(struct drm_crtc *crtc) +{ + drm_crtc_cleanup(crtc); +} + +static const struct drm_crtc_funcs vop_crtc_funcs = { + .set_config = drm_crtc_helper_set_config, + .page_flip = vop_crtc_page_flip, + .destroy = vop_crtc_destroy, +}; + +static bool vop_win_state_is_active(struct vop_win *vop_win, + struct vop_win_state *state) +{ + bool active = false; + + if (state->fb) { + dma_addr_t yrgb_mst; + + /* check yrgb_mst to tell if pending_fb is now front */ + yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data); + + active = (yrgb_mst == state->yrgb_mst); + } else { + bool enabled; + + /* if enable bit is clear, plane is now disabled */ + enabled = VOP_WIN_GET(vop_win->vop, vop_win->data, enable); + + active = (enabled == 0); + } + + return active; +} + +static void vop_win_state_destroy(struct vop_win_state *state) +{ + struct drm_framebuffer *fb = state->fb; + + if (fb) + drm_framebuffer_unreference(fb); + + kfree(state); +} + +static void vop_win_update_state(struct vop_win *vop_win) +{ + struct vop_win_state *state, *n, *new_active = NULL; + + /* Check if any pending states are now active */ + list_for_each_entry(state, &vop_win->pending, head) + if (vop_win_state_is_active(vop_win, state)) { + new_active = state; + break; + } + + if (!new_active) + return; + + /* + * Destroy any 'skipped' pending states - states that were queued + * before the newly active state. + */ + list_for_each_entry_safe(state, n, &vop_win->pending, head) { + if (state == new_active) + break; + vop_win_state_complete(vop_win, state); + vop_win_state_destroy(state); + } + + vop_win_state_complete(vop_win, new_active); + + if (vop_win->active) + vop_win_state_destroy(vop_win->active); + vop_win->active = new_active; +} + +static bool vop_win_has_pending_state(struct vop_win *vop_win) +{ + return !list_empty(&vop_win->pending); +} + +static irqreturn_t vop_isr_thread(int irq, void *data) +{ + struct vop *vop = data; + const struct vop_data *vop_data = vop->data; + unsigned int i; + + mutex_lock(&vop->vsync_mutex); + + if (!vop->vsync_work_pending) + goto done; + + vop->vsync_work_pending = false; + + for (i = 0; i < vop_data->win_size; i++) { + struct vop_win *vop_win = &vop->win[i]; + + vop_win_update_state(vop_win); + if (vop_win_has_pending_state(vop_win)) + vop->vsync_work_pending = true; + } + +done: + mutex_unlock(&vop->vsync_mutex); + + return IRQ_HANDLED; +} + +static irqreturn_t vop_isr(int irq, void *data) +{ + struct vop *vop = data; + uint32_t intr0_reg, active_irqs; + unsigned long flags; + + /* + * INTR_CTRL0 register has interrupt status, enable and clear bits, we + * must hold irq_lock to avoid a race with enable/disable_vblank(). + */ + spin_lock_irqsave(&vop->irq_lock, flags); + intr0_reg = vop_readl(vop, INTR_CTRL0); + active_irqs = intr0_reg & INTR_MASK; + /* Clear all active interrupt sources */ + if (active_irqs) + vop_writel(vop, INTR_CTRL0, + intr0_reg | (active_irqs << INTR_CLR_SHIFT)); + spin_unlock_irqrestore(&vop->irq_lock, flags); + + /* This is expected for vop iommu irqs, since the irq is shared */ + if (!active_irqs) + return IRQ_NONE; + + /* Only Frame Start Interrupt is enabled; other irqs are spurious. */ + if (!(active_irqs & FS_INTR)) { + DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs); + return IRQ_NONE; + } + + drm_handle_vblank(vop->drm_dev, vop->pipe); + + return (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED; +} + +static int vop_create_crtc(struct vop *vop) +{ + const struct vop_data *vop_data = vop->data; + struct device *dev = vop->dev; + struct drm_device *drm_dev = vop->drm_dev; + struct drm_plane *primary = NULL, *cursor = NULL, *plane; + struct drm_crtc *crtc = &vop->crtc; + struct device_node *port; + int ret; + int i; + + /* + * Create drm_plane for primary and cursor planes first, since we need + * to pass them to drm_crtc_init_with_planes, which sets the + * "possible_crtcs" to the newly initialized crtc. + */ + for (i = 0; i < vop_data->win_size; i++) { + struct vop_win *vop_win = &vop->win[i]; + const struct vop_win_data *win_data = vop_win->data; + + if (win_data->type != DRM_PLANE_TYPE_PRIMARY && + win_data->type != DRM_PLANE_TYPE_CURSOR) + continue; + + ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base, + 0, &vop_plane_funcs, + win_data->phy->data_formats, + win_data->phy->nformats, + win_data->type); + if (ret) { + DRM_ERROR("failed to initialize plane\n"); + goto err_cleanup_planes; + } + + plane = &vop_win->base; + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + primary = plane; + else if (plane->type == DRM_PLANE_TYPE_CURSOR) + cursor = plane; + } + + ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, + &vop_crtc_funcs); + if (ret) + return ret; + + drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); + + /* + * Create drm_planes for overlay windows with possible_crtcs restricted + * to the newly created crtc. + */ + for (i = 0; i < vop_data->win_size; i++) { + struct vop_win *vop_win = &vop->win[i]; + const struct vop_win_data *win_data = vop_win->data; + unsigned long possible_crtcs = 1 << drm_crtc_index(crtc); + + if (win_data->type != DRM_PLANE_TYPE_OVERLAY) + continue; + + ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base, + possible_crtcs, + &vop_plane_funcs, + win_data->phy->data_formats, + win_data->phy->nformats, + win_data->type); + if (ret) { + DRM_ERROR("failed to initialize overlay plane\n"); + goto err_cleanup_crtc; + } + } + + port = of_get_child_by_name(dev->of_node, "port"); + if (!port) { + DRM_ERROR("no port node found in %s\n", + dev->of_node->full_name); + goto err_cleanup_crtc; + } + + crtc->port = port; + vop->pipe = drm_crtc_index(crtc); + rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe); + + return 0; + +err_cleanup_crtc: + drm_crtc_cleanup(crtc); +err_cleanup_planes: + list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head) + drm_plane_cleanup(plane); + return ret; +} + +static void vop_destroy_crtc(struct vop *vop) +{ + struct drm_crtc *crtc = &vop->crtc; + + rockchip_unregister_crtc_funcs(vop->drm_dev, vop->pipe); + of_node_put(crtc->port); + drm_crtc_cleanup(crtc); +} + +static int vop_initial(struct vop *vop) +{ + const struct vop_data *vop_data = vop->data; + const struct vop_reg_data *init_table = vop_data->init_table; + struct reset_control *ahb_rst; + int i, ret; + + vop->hclk = devm_clk_get(vop->dev, "hclk_vop"); + if (IS_ERR(vop->hclk)) { + dev_err(vop->dev, "failed to get hclk source\n"); + return PTR_ERR(vop->hclk); + } + vop->aclk = devm_clk_get(vop->dev, "aclk_vop"); + if (IS_ERR(vop->aclk)) { + dev_err(vop->dev, "failed to get aclk source\n"); + return PTR_ERR(vop->aclk); + } + vop->dclk = devm_clk_get(vop->dev, "dclk_vop"); + if (IS_ERR(vop->dclk)) { + dev_err(vop->dev, "failed to get dclk source\n"); + return PTR_ERR(vop->dclk); + } + + ret = clk_prepare(vop->hclk); + if (ret < 0) { + dev_err(vop->dev, "failed to prepare hclk\n"); + return ret; + } + + ret = clk_prepare(vop->dclk); + if (ret < 0) { + dev_err(vop->dev, "failed to prepare dclk\n"); + goto err_unprepare_hclk; + } + + ret = clk_prepare(vop->aclk); + if (ret < 0) { + dev_err(vop->dev, "failed to prepare aclk\n"); + goto err_unprepare_dclk; + } + + /* + * enable hclk, so that we can config vop register. + */ + ret = clk_enable(vop->hclk); + if (ret < 0) { + dev_err(vop->dev, "failed to prepare aclk\n"); + goto err_unprepare_aclk; + } + /* + * do hclk_reset, reset all vop registers. + */ + ahb_rst = devm_reset_control_get(vop->dev, "ahb"); + if (IS_ERR(ahb_rst)) { + dev_err(vop->dev, "failed to get ahb reset\n"); + ret = PTR_ERR(ahb_rst); + goto err_disable_hclk; + } + reset_control_assert(ahb_rst); + usleep_range(10, 20); + reset_control_deassert(ahb_rst); + + memcpy(vop->regsbak, vop->regs, vop->len); + + for (i = 0; i < vop_data->table_size; i++) + vop_writel(vop, init_table[i].offset, init_table[i].value); + + for (i = 0; i < vop_data->win_size; i++) { + const struct vop_win_data *win = &vop_data->win[i]; + + VOP_WIN_SET(vop, win, enable, 0); + } + + vop_cfg_done(vop); + + /* + * do dclk_reset, let all config take affect. + */ + vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk"); + if (IS_ERR(vop->dclk_rst)) { + dev_err(vop->dev, "failed to get dclk reset\n"); + ret = PTR_ERR(vop->dclk_rst); + goto err_unprepare_aclk; + } + reset_control_assert(vop->dclk_rst); + usleep_range(10, 20); + reset_control_deassert(vop->dclk_rst); + + clk_disable(vop->hclk); + + vop->dpms = DRM_MODE_DPMS_OFF; + + return 0; + +err_disable_hclk: + clk_disable(vop->hclk); +err_unprepare_aclk: + clk_unprepare(vop->aclk); +err_unprepare_dclk: + clk_unprepare(vop->dclk); +err_unprepare_hclk: + clk_unprepare(vop->hclk); + return ret; +} + +/* + * Initialize the vop->win array elements. + */ +static void vop_win_init(struct vop *vop) +{ + const struct vop_data *vop_data = vop->data; + unsigned int i; + + for (i = 0; i < vop_data->win_size; i++) { + struct vop_win *vop_win = &vop->win[i]; + const struct vop_win_data *win_data = &vop_data->win[i]; + + vop_win->data = win_data; + vop_win->vop = vop; + INIT_LIST_HEAD(&vop_win->pending); + } +} + +static int vop_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + const struct of_device_id *of_id; + const struct vop_data *vop_data; + struct drm_device *drm_dev = data; + struct vop *vop; + struct resource *res; + size_t alloc_size; + int ret; + + of_id = of_match_device(vop_driver_dt_match, dev); + vop_data = of_id->data; + if (!vop_data) + return -ENODEV; + + /* Allocate vop struct and its vop_win array */ + alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size; + vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL); + if (!vop) + return -ENOMEM; + + vop->dev = dev; + vop->data = vop_data; + vop->drm_dev = drm_dev; + dev_set_drvdata(dev, vop); + + vop_win_init(vop); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + vop->len = resource_size(res); + vop->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(vop->regs)) + return PTR_ERR(vop->regs); + + vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL); + if (!vop->regsbak) + return -ENOMEM; + + ret = vop_initial(vop); + if (ret < 0) { + dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret); + return ret; + } + + vop->irq = platform_get_irq(pdev, 0); + if (vop->irq < 0) { + dev_err(dev, "cannot find irq for vop\n"); + return vop->irq; + } + + spin_lock_init(&vop->reg_lock); + spin_lock_init(&vop->irq_lock); + + mutex_init(&vop->vsync_mutex); + + ret = devm_request_threaded_irq(dev, vop->irq, vop_isr, vop_isr_thread, + IRQF_SHARED, dev_name(dev), vop); + if (ret) + return ret; + + /* IRQ is initially disabled; it gets enabled in power_on */ + disable_irq(vop->irq); + + ret = vop_create_crtc(vop); + if (ret) + return ret; + + pm_runtime_enable(&pdev->dev); + return 0; +} + +static void vop_unbind(struct device *dev, struct device *master, void *data) +{ + struct vop *vop = dev_get_drvdata(dev); + + pm_runtime_disable(dev); + vop_destroy_crtc(vop); +} + +static const struct component_ops vop_component_ops = { + .bind = vop_bind, + .unbind = vop_unbind, +}; + +static int vop_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + if (!dev->of_node) { + dev_err(dev, "can't find vop devices\n"); + return -ENODEV; + } + + return component_add(dev, &vop_component_ops); +} + +static int vop_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &vop_component_ops); + + return 0; +} + +struct platform_driver vop_platform_driver = { + .probe = vop_probe, + .remove = vop_remove, + .driver = { + .name = "rockchip-vop", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(vop_driver_dt_match), + }, +}; + +module_platform_driver(vop_platform_driver); + +MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>"); +MODULE_DESCRIPTION("ROCKCHIP VOP Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h new file mode 100644 index 000000000000..63e9b3a084c5 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h @@ -0,0 +1,201 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author:Mark Yao <mark.yao@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ROCKCHIP_DRM_VOP_H +#define _ROCKCHIP_DRM_VOP_H + +/* register definition */ +#define REG_CFG_DONE 0x0000 +#define VERSION_INFO 0x0004 +#define SYS_CTRL 0x0008 +#define SYS_CTRL1 0x000c +#define DSP_CTRL0 0x0010 +#define DSP_CTRL1 0x0014 +#define DSP_BG 0x0018 +#define MCU_CTRL 0x001c +#define INTR_CTRL0 0x0020 +#define INTR_CTRL1 0x0024 +#define WIN0_CTRL0 0x0030 +#define WIN0_CTRL1 0x0034 +#define WIN0_COLOR_KEY 0x0038 +#define WIN0_VIR 0x003c +#define WIN0_YRGB_MST 0x0040 +#define WIN0_CBR_MST 0x0044 +#define WIN0_ACT_INFO 0x0048 +#define WIN0_DSP_INFO 0x004c +#define WIN0_DSP_ST 0x0050 +#define WIN0_SCL_FACTOR_YRGB 0x0054 +#define WIN0_SCL_FACTOR_CBR 0x0058 +#define WIN0_SCL_OFFSET 0x005c +#define WIN0_SRC_ALPHA_CTRL 0x0060 +#define WIN0_DST_ALPHA_CTRL 0x0064 +#define WIN0_FADING_CTRL 0x0068 +/* win1 register */ +#define WIN1_CTRL0 0x0070 +#define WIN1_CTRL1 0x0074 +#define WIN1_COLOR_KEY 0x0078 +#define WIN1_VIR 0x007c +#define WIN1_YRGB_MST 0x0080 +#define WIN1_CBR_MST 0x0084 +#define WIN1_ACT_INFO 0x0088 +#define WIN1_DSP_INFO 0x008c +#define WIN1_DSP_ST 0x0090 +#define WIN1_SCL_FACTOR_YRGB 0x0094 +#define WIN1_SCL_FACTOR_CBR 0x0098 +#define WIN1_SCL_OFFSET 0x009c +#define WIN1_SRC_ALPHA_CTRL 0x00a0 +#define WIN1_DST_ALPHA_CTRL 0x00a4 +#define WIN1_FADING_CTRL 0x00a8 +/* win2 register */ +#define WIN2_CTRL0 0x00b0 +#define WIN2_CTRL1 0x00b4 +#define WIN2_VIR0_1 0x00b8 +#define WIN2_VIR2_3 0x00bc +#define WIN2_MST0 0x00c0 +#define WIN2_DSP_INFO0 0x00c4 +#define WIN2_DSP_ST0 0x00c8 +#define WIN2_COLOR_KEY 0x00cc +#define WIN2_MST1 0x00d0 +#define WIN2_DSP_INFO1 0x00d4 +#define WIN2_DSP_ST1 0x00d8 +#define WIN2_SRC_ALPHA_CTRL 0x00dc +#define WIN2_MST2 0x00e0 +#define WIN2_DSP_INFO2 0x00e4 +#define WIN2_DSP_ST2 0x00e8 +#define WIN2_DST_ALPHA_CTRL 0x00ec +#define WIN2_MST3 0x00f0 +#define WIN2_DSP_INFO3 0x00f4 +#define WIN2_DSP_ST3 0x00f8 +#define WIN2_FADING_CTRL 0x00fc +/* win3 register */ +#define WIN3_CTRL0 0x0100 +#define WIN3_CTRL1 0x0104 +#define WIN3_VIR0_1 0x0108 +#define WIN3_VIR2_3 0x010c +#define WIN3_MST0 0x0110 +#define WIN3_DSP_INFO0 0x0114 +#define WIN3_DSP_ST0 0x0118 +#define WIN3_COLOR_KEY 0x011c +#define WIN3_MST1 0x0120 +#define WIN3_DSP_INFO1 0x0124 +#define WIN3_DSP_ST1 0x0128 +#define WIN3_SRC_ALPHA_CTRL 0x012c +#define WIN3_MST2 0x0130 +#define WIN3_DSP_INFO2 0x0134 +#define WIN3_DSP_ST2 0x0138 +#define WIN3_DST_ALPHA_CTRL 0x013c +#define WIN3_MST3 0x0140 +#define WIN3_DSP_INFO3 0x0144 +#define WIN3_DSP_ST3 0x0148 +#define WIN3_FADING_CTRL 0x014c +/* hwc register */ +#define HWC_CTRL0 0x0150 +#define HWC_CTRL1 0x0154 +#define HWC_MST 0x0158 +#define HWC_DSP_ST 0x015c +#define HWC_SRC_ALPHA_CTRL 0x0160 +#define HWC_DST_ALPHA_CTRL 0x0164 +#define HWC_FADING_CTRL 0x0168 +/* post process register */ +#define POST_DSP_HACT_INFO 0x0170 +#define POST_DSP_VACT_INFO 0x0174 +#define POST_SCL_FACTOR_YRGB 0x0178 +#define POST_SCL_CTRL 0x0180 +#define POST_DSP_VACT_INFO_F1 0x0184 +#define DSP_HTOTAL_HS_END 0x0188 +#define DSP_HACT_ST_END 0x018c +#define DSP_VTOTAL_VS_END 0x0190 +#define DSP_VACT_ST_END 0x0194 +#define DSP_VS_ST_END_F1 0x0198 +#define DSP_VACT_ST_END_F1 0x019c +/* register definition end */ + +/* interrupt define */ +#define DSP_HOLD_VALID_INTR (1 << 0) +#define FS_INTR (1 << 1) +#define LINE_FLAG_INTR (1 << 2) +#define BUS_ERROR_INTR (1 << 3) + +#define INTR_MASK (DSP_HOLD_VALID_INTR | FS_INTR | \ + LINE_FLAG_INTR | BUS_ERROR_INTR) + +#define DSP_HOLD_VALID_INTR_EN(x) ((x) << 4) +#define FS_INTR_EN(x) ((x) << 5) +#define LINE_FLAG_INTR_EN(x) ((x) << 6) +#define BUS_ERROR_INTR_EN(x) ((x) << 7) +#define DSP_HOLD_VALID_INTR_MASK (1 << 4) +#define FS_INTR_MASK (1 << 5) +#define LINE_FLAG_INTR_MASK (1 << 6) +#define BUS_ERROR_INTR_MASK (1 << 7) + +#define INTR_CLR_SHIFT 8 +#define DSP_HOLD_VALID_INTR_CLR (1 << (INTR_CLR_SHIFT + 0)) +#define FS_INTR_CLR (1 << (INTR_CLR_SHIFT + 1)) +#define LINE_FLAG_INTR_CLR (1 << (INTR_CLR_SHIFT + 2)) +#define BUS_ERROR_INTR_CLR (1 << (INTR_CLR_SHIFT + 3)) + +#define DSP_LINE_NUM(x) (((x) & 0x1fff) << 12) +#define DSP_LINE_NUM_MASK (0x1fff << 12) + +/* src alpha ctrl define */ +#define SRC_FADING_VALUE(x) (((x) & 0xff) << 24) +#define SRC_GLOBAL_ALPHA(x) (((x) & 0xff) << 16) +#define SRC_FACTOR_M0(x) (((x) & 0x7) << 6) +#define SRC_ALPHA_CAL_M0(x) (((x) & 0x1) << 5) +#define SRC_BLEND_M0(x) (((x) & 0x3) << 3) +#define SRC_ALPHA_M0(x) (((x) & 0x1) << 2) +#define SRC_COLOR_M0(x) (((x) & 0x1) << 1) +#define SRC_ALPHA_EN(x) (((x) & 0x1) << 0) +/* dst alpha ctrl define */ +#define DST_FACTOR_M0(x) (((x) & 0x7) << 6) + +/* + * display output interface supported by rockchip lcdc + */ +#define ROCKCHIP_OUT_MODE_P888 0 +#define ROCKCHIP_OUT_MODE_P666 1 +#define ROCKCHIP_OUT_MODE_P565 2 +/* for use special outface */ +#define ROCKCHIP_OUT_MODE_AAAA 15 + +enum alpha_mode { + ALPHA_STRAIGHT, + ALPHA_INVERSE, +}; + +enum global_blend_mode { + ALPHA_GLOBAL, + ALPHA_PER_PIX, + ALPHA_PER_PIX_GLOBAL, +}; + +enum alpha_cal_mode { + ALPHA_SATURATION, + ALPHA_NO_SATURATION, +}; + +enum color_mode { + ALPHA_SRC_PRE_MUL, + ALPHA_SRC_NO_PRE_MUL, +}; + +enum factor_mode { + ALPHA_ZERO, + ALPHA_ONE, + ALPHA_SRC, + ALPHA_SRC_INVERSE, + ALPHA_SRC_GLOBAL, +}; + +#endif /* _ROCKCHIP_DRM_VOP_H */ diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index e62cbde81e50..666321de7b99 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -348,7 +348,6 @@ static struct platform_driver shmob_drm_platform_driver = { .probe = shmob_drm_probe, .remove = shmob_drm_remove, .driver = { - .owner = THIS_MODULE, .name = "shmob-drm", .pm = &shmob_drm_pm_ops, }, diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index ae8850f3e63b..d6d6b705b8c1 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig @@ -5,6 +5,7 @@ config DRM_STI select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER + select FW_LOADER_USER_HELPER_FALLBACK help Choose this option to enable DRM on STM stiH41x chipset diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile index 04ac2ceef27f..6ba9d27c1b90 100644 --- a/drivers/gpu/drm/sti/Makefile +++ b/drivers/gpu/drm/sti/Makefile @@ -3,6 +3,7 @@ sticompositor-y := \ sti_mixer.o \ sti_gdp.o \ sti_vid.o \ + sti_cursor.o \ sti_compositor.o \ sti_drm_crtc.o \ sti_drm_plane.o @@ -18,4 +19,5 @@ obj-$(CONFIG_DRM_STI) = \ sti_hda.o \ sti_tvout.o \ sticompositor.o \ - sti_drm_drv.o
\ No newline at end of file + sti_hqvdp.o \ + sti_drm_drv.o diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c index 390d93e9a06c..43215d3020fb 100644 --- a/drivers/gpu/drm/sti/sti_compositor.c +++ b/drivers/gpu/drm/sti/sti_compositor.c @@ -24,14 +24,16 @@ * stiH407 compositor properties */ struct sti_compositor_data stih407_compositor_data = { - .nb_subdev = 6, + .nb_subdev = 8, .subdev_desc = { + {STI_CURSOR_SUBDEV, (int)STI_CURSOR, 0x000}, {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100}, {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200}, {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300}, {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400}, {STI_VID_SUBDEV, (int)STI_VID_0, 0x700}, - {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00} + {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}, + {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00}, }, }; @@ -67,11 +69,11 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo, break; case STI_GPD_SUBDEV: case STI_VID_SUBDEV: + case STI_CURSOR_SUBDEV: compo->layer[layer_id++] = sti_layer_create(compo->dev, desc[i].id, compo->regs + desc[i].offset); break; - /* case STI_CURSOR_SUBDEV : TODO */ default: DRM_ERROR("Unknow subdev compoment type\n"); return 1; @@ -102,33 +104,35 @@ static int sti_compositor_bind(struct device *dev, struct device *master, enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK; enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY; - if (compo->mixer[crtc]) + if (crtc < compo->nb_mixers) plane_type = DRM_PLANE_TYPE_PRIMARY; switch (type) { case STI_CUR: cursor = sti_drm_plane_init(drm_dev, compo->layer[i], - (1 << crtc) - 1, - DRM_PLANE_TYPE_CURSOR); + 1, DRM_PLANE_TYPE_CURSOR); break; case STI_GDP: case STI_VID: primary = sti_drm_plane_init(drm_dev, compo->layer[i], - (1 << crtc) - 1, plane_type); + (1 << compo->nb_mixers) - 1, + plane_type); plane++; break; case STI_BCK: + case STI_VDP: break; } /* The first planes are reserved for primary planes*/ - if (compo->mixer[crtc]) { + if (crtc < compo->nb_mixers && primary) { sti_drm_crtc_init(drm_dev, compo->mixer[crtc], primary, cursor); crtc++; cursor = NULL; + primary = NULL; } } } @@ -267,7 +271,6 @@ static int sti_compositor_remove(struct platform_device *pdev) static struct platform_driver sti_compositor_driver = { .driver = { .name = "sti-compositor", - .owner = THIS_MODULE, .of_match_table = compositor_of_match, }, .probe = sti_compositor_probe, diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h index 3ea19db72e0f..019eb44c62cc 100644 --- a/drivers/gpu/drm/sti/sti_compositor.h +++ b/drivers/gpu/drm/sti/sti_compositor.h @@ -64,7 +64,6 @@ struct sti_compositor_data { * @layer: array of layers * @nb_mixers: number of mixers for this compositor * @nb_layers: number of layers (GDP,VID,...) for this compositor - * @enable: true if compositor is enable else false * @vtg_vblank_nb: callback for VTG VSYNC notification */ struct sti_compositor { @@ -83,7 +82,6 @@ struct sti_compositor { struct sti_layer *layer[STI_MAX_LAYER]; int nb_mixers; int nb_layers; - bool enable; struct notifier_block vtg_vblank_nb; }; diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c new file mode 100644 index 000000000000..010eaee60bf7 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -0,0 +1,242 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Vincent Abriou <vincent.abriou@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ +#include <drm/drmP.h> + +#include "sti_cursor.h" +#include "sti_layer.h" +#include "sti_vtg.h" + +/* Registers */ +#define CUR_CTL 0x00 +#define CUR_VPO 0x0C +#define CUR_PML 0x14 +#define CUR_PMP 0x18 +#define CUR_SIZE 0x1C +#define CUR_CML 0x20 +#define CUR_AWS 0x28 +#define CUR_AWE 0x2C + +#define CUR_CTL_CLUT_UPDATE BIT(1) + +#define STI_CURS_MIN_SIZE 1 +#define STI_CURS_MAX_SIZE 128 + +/* + * pixmap dma buffer stucture + * + * @paddr: physical address + * @size: buffer size + * @base: virtual address + */ +struct dma_pixmap { + dma_addr_t paddr; + size_t size; + void *base; +}; + +/** + * STI Cursor structure + * + * @layer: layer structure + * @width: cursor width + * @height: cursor height + * @clut: color look up table + * @clut_paddr: color look up table physical address + * @pixmap: pixmap dma buffer (clut8-format cursor) + */ +struct sti_cursor { + struct sti_layer layer; + unsigned int width; + unsigned int height; + unsigned short *clut; + dma_addr_t clut_paddr; + struct dma_pixmap pixmap; +}; + +static const uint32_t cursor_supported_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer) + +static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer) +{ + return cursor_supported_formats; +} + +static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer) +{ + return ARRAY_SIZE(cursor_supported_formats); +} + +static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer) +{ + struct sti_cursor *cursor = to_sti_cursor(layer); + u32 *src = layer->vaddr; + u8 *dst = cursor->pixmap.base; + unsigned int i, j; + u32 a, r, g, b; + + for (i = 0; i < cursor->height; i++) { + for (j = 0; j < cursor->width; j++) { + /* Pick the 2 higher bits of each component */ + a = (*src >> 30) & 3; + r = (*src >> 22) & 3; + g = (*src >> 14) & 3; + b = (*src >> 6) & 3; + *dst = a << 6 | r << 4 | g << 2 | b; + src++; + dst++; + } + } +} + +static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare) +{ + struct sti_cursor *cursor = to_sti_cursor(layer); + struct drm_display_mode *mode = layer->mode; + u32 y, x; + u32 val; + + DRM_DEBUG_DRIVER("\n"); + + dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); + + if (layer->src_w < STI_CURS_MIN_SIZE || + layer->src_h < STI_CURS_MIN_SIZE || + layer->src_w > STI_CURS_MAX_SIZE || + layer->src_h > STI_CURS_MAX_SIZE) { + DRM_ERROR("Invalid cursor size (%dx%d)\n", + layer->src_w, layer->src_h); + return -EINVAL; + } + + /* If the cursor size has changed, re-allocated the pixmap */ + if (!cursor->pixmap.base || + (cursor->width != layer->src_w) || + (cursor->height != layer->src_h)) { + cursor->width = layer->src_w; + cursor->height = layer->src_h; + + if (cursor->pixmap.base) + dma_free_writecombine(layer->dev, + cursor->pixmap.size, + cursor->pixmap.base, + cursor->pixmap.paddr); + + cursor->pixmap.size = cursor->width * cursor->height; + + cursor->pixmap.base = dma_alloc_writecombine(layer->dev, + cursor->pixmap.size, + &cursor->pixmap.paddr, + GFP_KERNEL | GFP_DMA); + if (!cursor->pixmap.base) { + DRM_ERROR("Failed to allocate memory for pixmap\n"); + return -ENOMEM; + } + } + + /* Convert ARGB8888 to CLUT8 */ + sti_cursor_argb8888_to_clut8(layer); + + /* AWS and AWE depend on the mode */ + y = sti_vtg_get_line_number(*mode, 0); + x = sti_vtg_get_pixel_number(*mode, 0); + val = y << 16 | x; + writel(val, layer->regs + CUR_AWS); + y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1); + x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1); + val = y << 16 | x; + writel(val, layer->regs + CUR_AWE); + + if (first_prepare) { + /* Set and fetch CLUT */ + writel(cursor->clut_paddr, layer->regs + CUR_CML); + writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL); + } + + return 0; +} + +static int sti_cursor_commit_layer(struct sti_layer *layer) +{ + struct sti_cursor *cursor = to_sti_cursor(layer); + struct drm_display_mode *mode = layer->mode; + u32 ydo, xdo; + + dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); + + /* Set memory location, size, and position */ + writel(cursor->pixmap.paddr, layer->regs + CUR_PML); + writel(cursor->width, layer->regs + CUR_PMP); + writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE); + + ydo = sti_vtg_get_line_number(*mode, layer->dst_y); + xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y); + writel((ydo << 16) | xdo, layer->regs + CUR_VPO); + + return 0; +} + +static int sti_cursor_disable_layer(struct sti_layer *layer) +{ + return 0; +} + +static void sti_cursor_init(struct sti_layer *layer) +{ + struct sti_cursor *cursor = to_sti_cursor(layer); + unsigned short *base = cursor->clut; + unsigned int a, r, g, b; + + /* Assign CLUT values, ARGB444 format */ + for (a = 0; a < 4; a++) + for (r = 0; r < 4; r++) + for (g = 0; g < 4; g++) + for (b = 0; b < 4; b++) + *base++ = (a * 5) << 12 | + (r * 5) << 8 | + (g * 5) << 4 | + (b * 5); +} + +static const struct sti_layer_funcs cursor_ops = { + .get_formats = sti_cursor_get_formats, + .get_nb_formats = sti_cursor_get_nb_formats, + .init = sti_cursor_init, + .prepare = sti_cursor_prepare_layer, + .commit = sti_cursor_commit_layer, + .disable = sti_cursor_disable_layer, +}; + +struct sti_layer *sti_cursor_create(struct device *dev) +{ + struct sti_cursor *cursor; + + cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL); + if (!cursor) { + DRM_ERROR("Failed to allocate memory for cursor\n"); + return NULL; + } + + /* Allocate clut buffer */ + cursor->clut = dma_alloc_writecombine(dev, + 0x100 * sizeof(unsigned short), + &cursor->clut_paddr, + GFP_KERNEL | GFP_DMA); + + if (!cursor->clut) { + DRM_ERROR("Failed to allocate memory for cursor clut\n"); + devm_kfree(dev, cursor); + return NULL; + } + + cursor->layer.ops = &cursor_ops; + + return (struct sti_layer *)cursor; +} diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h new file mode 100644 index 000000000000..3c9827404f27 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_cursor.h @@ -0,0 +1,12 @@ +/* + * Copyright (C) STMicroelectronics SA 2013 + * Authors: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_CURSOR_H_ +#define _STI_CURSOR_H_ + +struct sti_layer *sti_cursor_create(struct device *dev); + +#endif diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c index 36a1ad3c4823..4c651c200f20 100644 --- a/drivers/gpu/drm/sti/sti_drm_crtc.c +++ b/drivers/gpu/drm/sti/sti_drm_crtc.c @@ -28,7 +28,7 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc) struct device *dev = mixer->dev; struct sti_compositor *compo = dev_get_drvdata(dev); - compo->enable = true; + mixer->enabled = true; /* Prepare and enable the compo IP clock */ if (mixer->id == STI_MIXER_MAIN) { @@ -38,6 +38,8 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc) if (clk_prepare_enable(compo->clk_compo_aux)) DRM_INFO("Failed to prepare/enable compo_aux clk\n"); } + + sti_mixer_clear_all_layers(mixer); } static void sti_drm_crtc_commit(struct drm_crtc *crtc) @@ -62,6 +64,8 @@ static void sti_drm_crtc_commit(struct drm_crtc *crtc) /* Enable layer on mixer */ if (sti_mixer_set_layer_status(mixer, layer, true)) DRM_ERROR("Can not enable layer at mixer\n"); + + drm_crtc_vblank_on(crtc); } static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc, @@ -144,7 +148,8 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, w = crtc->primary->fb->width - x; h = crtc->primary->fb->height - y; - return sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode, + return sti_layer_prepare(layer, crtc, + crtc->primary->fb, &crtc->mode, mixer->id, 0, 0, w, h, x, y, w, h); } @@ -171,7 +176,8 @@ static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, w = crtc->primary->fb->width - crtc->x; h = crtc->primary->fb->height - crtc->y; - ret = sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode, + ret = sti_layer_prepare(layer, crtc, + crtc->primary->fb, &crtc->mode, mixer->id, 0, 0, w, h, crtc->x, crtc->y, w, h); if (ret) { @@ -196,7 +202,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc) struct sti_compositor *compo = dev_get_drvdata(dev); struct sti_layer *layer; - if (!compo->enable) + if (!mixer->enabled) return; DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer)); @@ -222,7 +228,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc) /* Then disable layer itself */ sti_layer_disable(layer); - drm_vblank_off(crtc->dev, mixer->id); + drm_crtc_vblank_off(crtc); /* Disable pixel clock and compo IP clocks */ if (mixer->id == STI_MIXER_MAIN) { @@ -233,7 +239,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc) clk_disable_unprepare(compo->clk_compo_aux); } - compo->enable = false; + mixer->enabled = false; } static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { @@ -364,7 +370,6 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) struct sti_drm_private *priv = dev->dev_private; struct sti_compositor *compo = priv->compo; struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; - unsigned long flags; DRM_DEBUG_DRIVER("\n"); @@ -373,13 +378,10 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); /* free the resources of the pending requests */ - spin_lock_irqsave(&dev->event_lock, flags); if (compo->mixer[crtc]->pending_event) { drm_vblank_put(dev, crtc); compo->mixer[crtc]->pending_event = NULL; } - spin_unlock_irqrestore(&dev->event_lock, flags); - } EXPORT_SYMBOL(sti_drm_crtc_disable_vblank); @@ -399,6 +401,7 @@ bool sti_drm_crtc_is_main(struct drm_crtc *crtc) return false; } +EXPORT_SYMBOL(sti_drm_crtc_is_main); int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, struct drm_plane *primary, struct drm_plane *cursor) diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c index 223d93c3a05d..5239fa121726 100644 --- a/drivers/gpu/drm/sti/sti_drm_drv.c +++ b/drivers/gpu/drm/sti/sti_drm_drv.c @@ -67,8 +67,12 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags) sti_drm_mode_config_init(dev); ret = component_bind_all(dev->dev, dev); - if (ret) + if (ret) { + drm_kms_helper_poll_fini(dev); + drm_mode_config_cleanup(dev); + kfree(private); return ret; + } drm_helper_disable_unused_functions(dev); @@ -184,7 +188,6 @@ static struct platform_driver sti_drm_master_driver = { .probe = sti_drm_master_probe, .remove = sti_drm_master_remove, .driver = { - .owner = THIS_MODULE, .name = DRIVER_NAME "__master", }, }; @@ -228,7 +231,6 @@ static struct platform_driver sti_drm_platform_driver = { .probe = sti_drm_platform_probe, .remove = sti_drm_platform_remove, .driver = { - .owner = THIS_MODULE, .name = DRIVER_NAME, .of_match_table = sti_drm_dt_ids, }, diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c index f4118d4cac22..bb6a29339e10 100644 --- a/drivers/gpu/drm/sti/sti_drm_plane.c +++ b/drivers/gpu/drm/sti/sti_drm_plane.c @@ -45,7 +45,8 @@ sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, } /* src_x are in 16.16 format. */ - res = sti_layer_prepare(layer, fb, &crtc->mode, mixer->id, + res = sti_layer_prepare(layer, crtc, fb, + &crtc->mode, mixer->id, crtc_x, crtc_y, crtc_w, crtc_h, src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16); @@ -193,3 +194,4 @@ struct drm_plane *sti_drm_plane_init(struct drm_device *dev, return &layer->plane; } +EXPORT_SYMBOL(sti_drm_plane_init); diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 4e30b74559f5..32448d1d1e8f 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -73,7 +73,9 @@ struct sti_gdp_node { struct sti_gdp_node_list { struct sti_gdp_node *top_field; + dma_addr_t top_field_paddr; struct sti_gdp_node *btm_field; + dma_addr_t btm_field_paddr; }; /** @@ -81,6 +83,8 @@ struct sti_gdp_node_list { * * @layer: layer structure * @clk_pix: pixel clock for the current gdp + * @clk_main_parent: gdp parent clock if main path used + * @clk_aux_parent: gdp parent clock if aux path used * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification * @is_curr_top: true if the current node processed is the top field * @node_list: array of node list @@ -88,6 +92,8 @@ struct sti_gdp_node_list { struct sti_gdp { struct sti_layer layer; struct clk *clk_pix; + struct clk *clk_main_parent; + struct clk *clk_aux_parent; struct notifier_block vtg_field_nb; bool is_curr_top; struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK]; @@ -168,7 +174,6 @@ static int sti_gdp_get_alpharange(int format) static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) { int hw_nvn; - void *virt_nvn; struct sti_gdp *gdp = to_sti_gdp(layer); unsigned int i; @@ -176,11 +181,9 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) if (!hw_nvn) goto end; - virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn); - for (i = 0; i < GDP_NODE_NB_BANK; i++) - if ((virt_nvn != gdp->node_list[i].btm_field) && - (virt_nvn != gdp->node_list[i].top_field)) + if ((hw_nvn != gdp->node_list[i].btm_field_paddr) && + (hw_nvn != gdp->node_list[i].top_field_paddr)) return &gdp->node_list[i]; /* in hazardious cases restart with the first node */ @@ -204,7 +207,6 @@ static struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) { int hw_nvn; - void *virt_nvn; struct sti_gdp *gdp = to_sti_gdp(layer); unsigned int i; @@ -212,11 +214,9 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) if (!hw_nvn) goto end; - virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn); - for (i = 0; i < GDP_NODE_NB_BANK; i++) - if ((virt_nvn == gdp->node_list[i].btm_field) || - (virt_nvn == gdp->node_list[i].top_field)) + if ((hw_nvn == gdp->node_list[i].btm_field_paddr) || + (hw_nvn == gdp->node_list[i].top_field_paddr)) return &gdp->node_list[i]; end: @@ -292,8 +292,8 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare) /* Same content and chained together */ memcpy(btm_field, top_field, sizeof(*btm_field)); - top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field); - btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field); + top_field->gam_gdp_nvn = list->btm_field_paddr; + btm_field->gam_gdp_nvn = list->top_field_paddr; /* Interlaced mode */ if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -311,6 +311,17 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare) /* Set and enable gdp clock */ if (gdp->clk_pix) { + struct clk *clkp; + /* According to the mixer used, the gdp pixel clock + * should have a different parent clock. */ + if (layer->mixer_id == STI_MIXER_MAIN) + clkp = gdp->clk_main_parent; + else + clkp = gdp->clk_aux_parent; + + if (clkp) + clk_set_parent(gdp->clk_pix, clkp); + res = clk_set_rate(gdp->clk_pix, rate); if (res < 0) { DRM_ERROR("Cannot set rate (%dHz) for gdp\n", @@ -349,8 +360,8 @@ static int sti_gdp_commit_layer(struct sti_layer *layer) struct sti_gdp_node *updated_top_node = updated_list->top_field; struct sti_gdp_node *updated_btm_node = updated_list->btm_field; struct sti_gdp *gdp = to_sti_gdp(layer); - u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node); - u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node); + u32 dma_updated_top = updated_list->top_field_paddr; + u32 dma_updated_btm = updated_list->btm_field_paddr; struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer); dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__, @@ -461,16 +472,16 @@ static void sti_gdp_init(struct sti_layer *layer) { struct sti_gdp *gdp = to_sti_gdp(layer); struct device_node *np = layer->dev->of_node; - dma_addr_t dma; + dma_addr_t dma_addr; void *base; unsigned int i, size; /* Allocate all the nodes within a single memory page */ size = sizeof(struct sti_gdp_node) * GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK; - base = dma_alloc_writecombine(layer->dev, - size, &dma, GFP_KERNEL | GFP_DMA); + size, &dma_addr, GFP_KERNEL | GFP_DMA); + if (!base) { DRM_ERROR("Failed to allocate memory for GDP node\n"); return; @@ -478,21 +489,26 @@ static void sti_gdp_init(struct sti_layer *layer) memset(base, 0, size); for (i = 0; i < GDP_NODE_NB_BANK; i++) { - if (virt_to_dma(layer->dev, base) & 0xF) { + if (dma_addr & 0xF) { DRM_ERROR("Mem alignment failed\n"); return; } gdp->node_list[i].top_field = base; + gdp->node_list[i].top_field_paddr = dma_addr; + DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base); base += sizeof(struct sti_gdp_node); + dma_addr += sizeof(struct sti_gdp_node); - if (virt_to_dma(layer->dev, base) & 0xF) { + if (dma_addr & 0xF) { DRM_ERROR("Mem alignment failed\n"); return; } gdp->node_list[i].btm_field = base; + gdp->node_list[i].btm_field_paddr = dma_addr; DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base); base += sizeof(struct sti_gdp_node); + dma_addr += sizeof(struct sti_gdp_node); } if (of_device_is_compatible(np, "st,stih407-compositor")) { @@ -520,6 +536,14 @@ static void sti_gdp_init(struct sti_layer *layer) gdp->clk_pix = devm_clk_get(layer->dev, clk_name); if (IS_ERR(gdp->clk_pix)) DRM_ERROR("Cannot get %s clock\n", clk_name); + + gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent"); + if (IS_ERR(gdp->clk_main_parent)) + DRM_ERROR("Cannot get main_parent clock\n"); + + gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent"); + if (IS_ERR(gdp->clk_aux_parent)) + DRM_ERROR("Cannot get aux_parent clock\n"); } } diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index b22968c08d1f..d032e024b0b8 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -130,8 +130,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg) /* Hot plug/unplug IRQ */ if (hdmi->irq_status & HDMI_INT_HOT_PLUG) { - /* read gpio to get the status */ - hdmi->hpd = gpio_get_value(hdmi->hpd_gpio); + hdmi->hpd = readl(hdmi->regs + HDMI_STA) & HDMI_STA_HOT_PLUG; if (hdmi->drm_dev) drm_helper_hpd_irq_event(hdmi->drm_dev); } @@ -273,31 +272,32 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi) hdmi_write(hdmi, val, HDMI_SW_DI_CFG); /* Infoframe header */ - val = buffer[0x0]; - val |= buffer[0x1] << 8; - val |= buffer[0x2] << 16; + val = buffer[0]; + val |= buffer[1] << 8; + val |= buffer[2] << 16; hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI)); /* Infoframe packet bytes */ - val = frame[0x0]; - val |= frame[0x1] << 8; - val |= frame[0x2] << 16; - val |= frame[0x3] << 24; + val = buffer[3]; + val |= *(frame++) << 8; + val |= *(frame++) << 16; + val |= *(frame++) << 24; hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI)); - val = frame[0x4]; - val |= frame[0x5] << 8; - val |= frame[0x6] << 16; - val |= frame[0x7] << 24; + val = *(frame++); + val |= *(frame++) << 8; + val |= *(frame++) << 16; + val |= *(frame++) << 24; hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI)); - val = frame[0x8]; - val |= frame[0x9] << 8; - val |= frame[0xA] << 16; - val |= frame[0xB] << 24; + val = *(frame++); + val |= *(frame++) << 8; + val |= *(frame++) << 16; + val |= *(frame++) << 24; hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI)); - val = frame[0xC]; + val = *(frame++); + val |= *(frame) << 8; hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI)); /* Enable transmission slot for AVI infoframe @@ -480,17 +480,15 @@ static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = { static int sti_hdmi_connector_get_modes(struct drm_connector *connector) { - struct i2c_adapter *i2c_adap; + struct sti_hdmi_connector *hdmi_connector + = to_sti_hdmi_connector(connector); + struct sti_hdmi *hdmi = hdmi_connector->hdmi; struct edid *edid; int count; DRM_DEBUG_DRIVER("\n"); - i2c_adap = i2c_get_adapter(1); - if (!i2c_adap) - goto fail; - - edid = drm_get_edid(connector, i2c_adap); + edid = drm_get_edid(connector, hdmi->ddc_adapt); if (!edid) goto fail; @@ -603,29 +601,38 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) struct sti_hdmi_connector *connector; struct drm_connector *drm_connector; struct drm_bridge *bridge; - struct i2c_adapter *i2c_adap; + struct device_node *ddc; int err; - i2c_adap = i2c_get_adapter(1); - if (!i2c_adap) - return -EPROBE_DEFER; + ddc = of_parse_phandle(dev->of_node, "ddc", 0); + if (ddc) { + hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc); + if (!hdmi->ddc_adapt) { + err = -EPROBE_DEFER; + of_node_put(ddc); + return err; + } + + of_node_put(ddc); + } /* Set the drm device handle */ hdmi->drm_dev = drm_dev; encoder = sti_hdmi_find_encoder(drm_dev); if (!encoder) - return -ENOMEM; + goto err_adapt; connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL); if (!connector) - return -ENOMEM; + goto err_adapt; + connector->hdmi = hdmi; bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL); if (!bridge) - return -ENOMEM; + goto err_adapt; bridge->driver_private = hdmi; drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs); @@ -662,6 +669,8 @@ err_sysfs: err_connector: drm_bridge_cleanup(bridge); drm_connector_cleanup(drm_connector); +err_adapt: + put_device(&hdmi->ddc_adapt->dev); return -EINVAL; } @@ -757,13 +766,7 @@ static int sti_hdmi_probe(struct platform_device *pdev) return PTR_ERR(hdmi->clk_audio); } - hdmi->hpd_gpio = of_get_named_gpio(np, "hdmi,hpd-gpio", 0); - if (hdmi->hpd_gpio < 0) { - DRM_ERROR("Failed to get hdmi hpd-gpio\n"); - return -EIO; - } - - hdmi->hpd = gpio_get_value(hdmi->hpd_gpio); + hdmi->hpd = readl(hdmi->regs + HDMI_STA) & HDMI_STA_HOT_PLUG; init_waitqueue_head(&hdmi->wait_event); @@ -788,6 +791,11 @@ static int sti_hdmi_probe(struct platform_device *pdev) static int sti_hdmi_remove(struct platform_device *pdev) { + struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev); + + if (hdmi->ddc_adapt) + put_device(&hdmi->ddc_adapt->dev); + component_del(&pdev->dev, &sti_hdmi_ops); return 0; } diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h index 61bec6557ceb..3d22390e1f3b 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.h +++ b/drivers/gpu/drm/sti/sti_hdmi.h @@ -14,6 +14,9 @@ #define HDMI_STA 0x0010 #define HDMI_STA_DLL_LCK BIT(5) +#define HDMI_STA_HOT_PLUG_SHIFT 4 +#define HDMI_STA_HOT_PLUG (1 << HDMI_STA_HOT_PLUG_SHIFT) + struct sti_hdmi; struct hdmi_phy_ops { @@ -37,7 +40,6 @@ struct hdmi_phy_ops { * @irq_status: interrupt status register * @phy_ops: phy start/stop operations * @enabled: true if hdmi is enabled else false - * @hpd_gpio: hdmi hot plug detect gpio number * @hpd: hot plug detect status * @wait_event: wait event * @event_received: wait event status @@ -57,11 +59,11 @@ struct sti_hdmi { u32 irq_status; struct hdmi_phy_ops *phy_ops; bool enabled; - int hpd_gpio; bool hpd; wait_queue_head_t wait_event; bool event_received; struct reset_control *reset; + struct i2c_adapter *ddc_adapt; }; u32 hdmi_read(struct sti_hdmi *hdmi, int offset); diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c new file mode 100644 index 000000000000..f3db05dab0ab --- /dev/null +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -0,0 +1,1073 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include <drm/drmP.h> + +#include "sti_drm_plane.h" +#include "sti_hqvdp.h" +#include "sti_hqvdp_lut.h" +#include "sti_layer.h" +#include "sti_vtg.h" + +/* Firmware name */ +#define HQVDP_FMW_NAME "hqvdp-stih407.bin" + +/* Regs address */ +#define HQVDP_DMEM 0x00000000 /* 0x00000000 */ +#define HQVDP_PMEM 0x00040000 /* 0x00040000 */ +#define HQVDP_RD_PLUG 0x000E0000 /* 0x000E0000 */ +#define HQVDP_RD_PLUG_CONTROL (HQVDP_RD_PLUG + 0x1000) /* 0x000E1000 */ +#define HQVDP_RD_PLUG_PAGE_SIZE (HQVDP_RD_PLUG + 0x1004) /* 0x000E1004 */ +#define HQVDP_RD_PLUG_MIN_OPC (HQVDP_RD_PLUG + 0x1008) /* 0x000E1008 */ +#define HQVDP_RD_PLUG_MAX_OPC (HQVDP_RD_PLUG + 0x100C) /* 0x000E100C */ +#define HQVDP_RD_PLUG_MAX_CHK (HQVDP_RD_PLUG + 0x1010) /* 0x000E1010 */ +#define HQVDP_RD_PLUG_MAX_MSG (HQVDP_RD_PLUG + 0x1014) /* 0x000E1014 */ +#define HQVDP_RD_PLUG_MIN_SPACE (HQVDP_RD_PLUG + 0x1018) /* 0x000E1018 */ +#define HQVDP_WR_PLUG 0x000E2000 /* 0x000E2000 */ +#define HQVDP_WR_PLUG_CONTROL (HQVDP_WR_PLUG + 0x1000) /* 0x000E3000 */ +#define HQVDP_WR_PLUG_PAGE_SIZE (HQVDP_WR_PLUG + 0x1004) /* 0x000E3004 */ +#define HQVDP_WR_PLUG_MIN_OPC (HQVDP_WR_PLUG + 0x1008) /* 0x000E3008 */ +#define HQVDP_WR_PLUG_MAX_OPC (HQVDP_WR_PLUG + 0x100C) /* 0x000E300C */ +#define HQVDP_WR_PLUG_MAX_CHK (HQVDP_WR_PLUG + 0x1010) /* 0x000E3010 */ +#define HQVDP_WR_PLUG_MAX_MSG (HQVDP_WR_PLUG + 0x1014) /* 0x000E3014 */ +#define HQVDP_WR_PLUG_MIN_SPACE (HQVDP_WR_PLUG + 0x1018) /* 0x000E3018 */ +#define HQVDP_MBX 0x000E4000 /* 0x000E4000 */ +#define HQVDP_MBX_IRQ_TO_XP70 (HQVDP_MBX + 0x0000) /* 0x000E4000 */ +#define HQVDP_MBX_INFO_HOST (HQVDP_MBX + 0x0004) /* 0x000E4004 */ +#define HQVDP_MBX_IRQ_TO_HOST (HQVDP_MBX + 0x0008) /* 0x000E4008 */ +#define HQVDP_MBX_INFO_XP70 (HQVDP_MBX + 0x000C) /* 0x000E400C */ +#define HQVDP_MBX_SW_RESET_CTRL (HQVDP_MBX + 0x0010) /* 0x000E4010 */ +#define HQVDP_MBX_STARTUP_CTRL1 (HQVDP_MBX + 0x0014) /* 0x000E4014 */ +#define HQVDP_MBX_STARTUP_CTRL2 (HQVDP_MBX + 0x0018) /* 0x000E4018 */ +#define HQVDP_MBX_GP_STATUS (HQVDP_MBX + 0x001C) /* 0x000E401C */ +#define HQVDP_MBX_NEXT_CMD (HQVDP_MBX + 0x0020) /* 0x000E4020 */ +#define HQVDP_MBX_CURRENT_CMD (HQVDP_MBX + 0x0024) /* 0x000E4024 */ +#define HQVDP_MBX_SOFT_VSYNC (HQVDP_MBX + 0x0028) /* 0x000E4028 */ + +/* Plugs config */ +#define PLUG_CONTROL_ENABLE 0x00000001 +#define PLUG_PAGE_SIZE_256 0x00000002 +#define PLUG_MIN_OPC_8 0x00000003 +#define PLUG_MAX_OPC_64 0x00000006 +#define PLUG_MAX_CHK_2X 0x00000001 +#define PLUG_MAX_MSG_1X 0x00000000 +#define PLUG_MIN_SPACE_1 0x00000000 + +/* SW reset CTRL */ +#define SW_RESET_CTRL_FULL BIT(0) +#define SW_RESET_CTRL_CORE BIT(1) + +/* Startup ctrl 1 */ +#define STARTUP_CTRL1_RST_DONE BIT(0) +#define STARTUP_CTRL1_AUTH_IDLE BIT(2) + +/* Startup ctrl 2 */ +#define STARTUP_CTRL2_FETCH_EN BIT(1) + +/* Info xP70 */ +#define INFO_XP70_FW_READY BIT(15) +#define INFO_XP70_FW_PROCESSING BIT(14) +#define INFO_XP70_FW_INITQUEUES BIT(13) + +/* SOFT_VSYNC */ +#define SOFT_VSYNC_HW 0x00000000 +#define SOFT_VSYNC_SW_CMD 0x00000001 +#define SOFT_VSYNC_SW_CTRL_IRQ 0x00000003 + +/* Reset & boot poll config */ +#define POLL_MAX_ATTEMPT 50 +#define POLL_DELAY_MS 20 + +#define SCALE_FACTOR 8192 +#define SCALE_MAX_FOR_LEG_LUT_F 4096 +#define SCALE_MAX_FOR_LEG_LUT_E 4915 +#define SCALE_MAX_FOR_LEG_LUT_D 6654 +#define SCALE_MAX_FOR_LEG_LUT_C 8192 + +enum sti_hvsrc_orient { + HVSRC_HORI, + HVSRC_VERT +}; + +/* Command structures */ +struct sti_hqvdp_top { + u32 config; + u32 mem_format; + u32 current_luma; + u32 current_enh_luma; + u32 current_right_luma; + u32 current_enh_right_luma; + u32 current_chroma; + u32 current_enh_chroma; + u32 current_right_chroma; + u32 current_enh_right_chroma; + u32 output_luma; + u32 output_chroma; + u32 luma_src_pitch; + u32 luma_enh_src_pitch; + u32 luma_right_src_pitch; + u32 luma_enh_right_src_pitch; + u32 chroma_src_pitch; + u32 chroma_enh_src_pitch; + u32 chroma_right_src_pitch; + u32 chroma_enh_right_src_pitch; + u32 luma_processed_pitch; + u32 chroma_processed_pitch; + u32 input_frame_size; + u32 input_viewport_ori; + u32 input_viewport_ori_right; + u32 input_viewport_size; + u32 left_view_border_width; + u32 right_view_border_width; + u32 left_view_3d_offset_width; + u32 right_view_3d_offset_width; + u32 side_stripe_color; + u32 crc_reset_ctrl; +}; + +/* Configs for interlaced : no IT, no pass thru, 3 fields */ +#define TOP_CONFIG_INTER_BTM 0x00000000 +#define TOP_CONFIG_INTER_TOP 0x00000002 + +/* Config for progressive : no IT, no pass thru, 3 fields */ +#define TOP_CONFIG_PROGRESSIVE 0x00000001 + +/* Default MemFormat: in=420_raster_dual out=444_raster;opaque Mem2Tv mode */ +#define TOP_MEM_FORMAT_DFLT 0x00018060 + +/* Min/Max size */ +#define MAX_WIDTH 0x1FFF +#define MAX_HEIGHT 0x0FFF +#define MIN_WIDTH 0x0030 +#define MIN_HEIGHT 0x0010 + +struct sti_hqvdp_vc1re { + u32 ctrl_prv_csdi; + u32 ctrl_cur_csdi; + u32 ctrl_nxt_csdi; + u32 ctrl_cur_fmd; + u32 ctrl_nxt_fmd; +}; + +struct sti_hqvdp_fmd { + u32 config; + u32 viewport_ori; + u32 viewport_size; + u32 next_next_luma; + u32 next_next_right_luma; + u32 next_next_next_luma; + u32 next_next_next_right_luma; + u32 threshold_scd; + u32 threshold_rfd; + u32 threshold_move; + u32 threshold_cfd; +}; + +struct sti_hqvdp_csdi { + u32 config; + u32 config2; + u32 dcdi_config; + u32 prev_luma; + u32 prev_enh_luma; + u32 prev_right_luma; + u32 prev_enh_right_luma; + u32 next_luma; + u32 next_enh_luma; + u32 next_right_luma; + u32 next_enh_right_luma; + u32 prev_chroma; + u32 prev_enh_chroma; + u32 prev_right_chroma; + u32 prev_enh_right_chroma; + u32 next_chroma; + u32 next_enh_chroma; + u32 next_right_chroma; + u32 next_enh_right_chroma; + u32 prev_motion; + u32 prev_right_motion; + u32 cur_motion; + u32 cur_right_motion; + u32 next_motion; + u32 next_right_motion; +}; + +/* Config for progressive: by pass */ +#define CSDI_CONFIG_PROG 0x00000000 +/* Config for directional deinterlacing without motion */ +#define CSDI_CONFIG_INTER_DIR 0x00000016 +/* Additional configs for fader, blender, motion,... deinterlace algorithms */ +#define CSDI_CONFIG2_DFLT 0x000001B3 +#define CSDI_DCDI_CONFIG_DFLT 0x00203803 + +struct sti_hqvdp_hvsrc { + u32 hor_panoramic_ctrl; + u32 output_picture_size; + u32 init_horizontal; + u32 init_vertical; + u32 param_ctrl; + u32 yh_coef[NB_COEF]; + u32 ch_coef[NB_COEF]; + u32 yv_coef[NB_COEF]; + u32 cv_coef[NB_COEF]; + u32 hori_shift; + u32 vert_shift; +}; + +/* Default ParamCtrl: all controls enabled */ +#define HVSRC_PARAM_CTRL_DFLT 0xFFFFFFFF + +struct sti_hqvdp_iqi { + u32 config; + u32 demo_wind_size; + u32 pk_config; + u32 coeff0_coeff1; + u32 coeff2_coeff3; + u32 coeff4; + u32 pk_lut; + u32 pk_gain; + u32 pk_coring_level; + u32 cti_config; + u32 le_config; + u32 le_lut[64]; + u32 con_bri; + u32 sat_gain; + u32 pxf_conf; + u32 default_color; +}; + +/* Default Config : IQI bypassed */ +#define IQI_CONFIG_DFLT 0x00000001 +/* Default Contrast & Brightness gain = 256 */ +#define IQI_CON_BRI_DFLT 0x00000100 +/* Default Saturation gain = 256 */ +#define IQI_SAT_GAIN_DFLT 0x00000100 +/* Default PxfConf : P2I bypassed */ +#define IQI_PXF_CONF_DFLT 0x00000001 + +struct sti_hqvdp_top_status { + u32 processing_time; + u32 input_y_crc; + u32 input_uv_crc; +}; + +struct sti_hqvdp_fmd_status { + u32 fmd_repeat_move_status; + u32 fmd_scene_count_status; + u32 cfd_sum; + u32 field_sum; + u32 next_y_fmd_crc; + u32 next_next_y_fmd_crc; + u32 next_next_next_y_fmd_crc; +}; + +struct sti_hqvdp_csdi_status { + u32 prev_y_csdi_crc; + u32 cur_y_csdi_crc; + u32 next_y_csdi_crc; + u32 prev_uv_csdi_crc; + u32 cur_uv_csdi_crc; + u32 next_uv_csdi_crc; + u32 y_csdi_crc; + u32 uv_csdi_crc; + u32 uv_cup_crc; + u32 mot_csdi_crc; + u32 mot_cur_csdi_crc; + u32 mot_prev_csdi_crc; +}; + +struct sti_hqvdp_hvsrc_status { + u32 y_hvsrc_crc; + u32 u_hvsrc_crc; + u32 v_hvsrc_crc; +}; + +struct sti_hqvdp_iqi_status { + u32 pxf_it_status; + u32 y_iqi_crc; + u32 u_iqi_crc; + u32 v_iqi_crc; +}; + +/* Main commands. We use 2 commands one being processed by the firmware, one + * ready to be fetched upon next Vsync*/ +#define NB_VDP_CMD 2 + +struct sti_hqvdp_cmd { + struct sti_hqvdp_top top; + struct sti_hqvdp_vc1re vc1re; + struct sti_hqvdp_fmd fmd; + struct sti_hqvdp_csdi csdi; + struct sti_hqvdp_hvsrc hvsrc; + struct sti_hqvdp_iqi iqi; + struct sti_hqvdp_top_status top_status; + struct sti_hqvdp_fmd_status fmd_status; + struct sti_hqvdp_csdi_status csdi_status; + struct sti_hqvdp_hvsrc_status hvsrc_status; + struct sti_hqvdp_iqi_status iqi_status; +}; + +/* + * STI HQVDP structure + * + * @dev: driver device + * @drm_dev: the drm device + * @regs: registers + * @layer: layer structure for hqvdp it self + * @vid_plane: VID plug used as link with compositor IP + * @clk: IP clock + * @clk_pix_main: pix main clock + * @reset: reset control + * @vtg_nb: notifier to handle VTG Vsync + * @btm_field_pending: is there any bottom field (interlaced frame) to display + * @curr_field_count: number of field updates + * @last_field_count: number of field updates since last fps measure + * @hqvdp_cmd: buffer of commands + * @hqvdp_cmd_paddr: physical address of hqvdp_cmd + * @vtg: vtg for main data path + */ +struct sti_hqvdp { + struct device *dev; + struct drm_device *drm_dev; + void __iomem *regs; + struct sti_layer layer; + struct drm_plane *vid_plane; + struct clk *clk; + struct clk *clk_pix_main; + struct reset_control *reset; + struct notifier_block vtg_nb; + bool btm_field_pending; + unsigned int curr_field_count; + unsigned int last_field_count; + void *hqvdp_cmd; + dma_addr_t hqvdp_cmd_paddr; + struct sti_vtg *vtg; +}; + +#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer) + +static const uint32_t hqvdp_supported_formats[] = { + DRM_FORMAT_NV12, +}; + +static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer) +{ + return hqvdp_supported_formats; +} + +static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer) +{ + return ARRAY_SIZE(hqvdp_supported_formats); +} + +/** + * sti_hqvdp_get_free_cmd + * @hqvdp: hqvdp structure + * + * Look for a hqvdp_cmd that is not being used (or about to be used) by the FW. + * + * RETURNS: + * the offset of the command to be used. + * -1 in error cases + */ +static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp) +{ + int curr_cmd, next_cmd; + dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr; + int i; + + curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD); + next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD); + + for (i = 0; i < NB_VDP_CMD; i++) { + if ((cmd != curr_cmd) && (cmd != next_cmd)) + return i * sizeof(struct sti_hqvdp_cmd); + cmd += sizeof(struct sti_hqvdp_cmd); + } + + return -1; +} + +/** + * sti_hqvdp_get_curr_cmd + * @hqvdp: hqvdp structure + * + * Look for the hqvdp_cmd that is being used by the FW. + * + * RETURNS: + * the offset of the command to be used. + * -1 in error cases + */ +static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp) +{ + int curr_cmd; + dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr; + unsigned int i; + + curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD); + + for (i = 0; i < NB_VDP_CMD; i++) { + if (cmd == curr_cmd) + return i * sizeof(struct sti_hqvdp_cmd); + + cmd += sizeof(struct sti_hqvdp_cmd); + } + + return -1; +} + +/** + * sti_hqvdp_update_hvsrc + * @orient: horizontal or vertical + * @scale: scaling/zoom factor + * @hvsrc: the structure containing the LUT coef + * + * Update the Y and C Lut coef, as well as the shift param + * + * RETURNS: + * None. + */ +static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale, + struct sti_hqvdp_hvsrc *hvsrc) +{ + const int *coef_c, *coef_y; + int shift_c, shift_y; + + /* Get the appropriate coef tables */ + if (scale < SCALE_MAX_FOR_LEG_LUT_F) { + coef_y = coef_lut_f_y_legacy; + coef_c = coef_lut_f_c_legacy; + shift_y = SHIFT_LUT_F_Y_LEGACY; + shift_c = SHIFT_LUT_F_C_LEGACY; + } else if (scale < SCALE_MAX_FOR_LEG_LUT_E) { + coef_y = coef_lut_e_y_legacy; + coef_c = coef_lut_e_c_legacy; + shift_y = SHIFT_LUT_E_Y_LEGACY; + shift_c = SHIFT_LUT_E_C_LEGACY; + } else if (scale < SCALE_MAX_FOR_LEG_LUT_D) { + coef_y = coef_lut_d_y_legacy; + coef_c = coef_lut_d_c_legacy; + shift_y = SHIFT_LUT_D_Y_LEGACY; + shift_c = SHIFT_LUT_D_C_LEGACY; + } else if (scale < SCALE_MAX_FOR_LEG_LUT_C) { + coef_y = coef_lut_c_y_legacy; + coef_c = coef_lut_c_c_legacy; + shift_y = SHIFT_LUT_C_Y_LEGACY; + shift_c = SHIFT_LUT_C_C_LEGACY; + } else if (scale == SCALE_MAX_FOR_LEG_LUT_C) { + coef_y = coef_c = coef_lut_b; + shift_y = shift_c = SHIFT_LUT_B; + } else { + coef_y = coef_c = coef_lut_a_legacy; + shift_y = shift_c = SHIFT_LUT_A_LEGACY; + } + + if (orient == HVSRC_HORI) { + hvsrc->hori_shift = (shift_c << 16) | shift_y; + memcpy(hvsrc->yh_coef, coef_y, sizeof(hvsrc->yh_coef)); + memcpy(hvsrc->ch_coef, coef_c, sizeof(hvsrc->ch_coef)); + } else { + hvsrc->vert_shift = (shift_c << 16) | shift_y; + memcpy(hvsrc->yv_coef, coef_y, sizeof(hvsrc->yv_coef)); + memcpy(hvsrc->cv_coef, coef_c, sizeof(hvsrc->cv_coef)); + } +} + +/** + * sti_hqvdp_check_hw_scaling + * @layer: hqvdp layer + * + * Check if the HW is able to perform the scaling request + * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where: + * Zy = OutputHeight / InputHeight + * LFW = (Tx * IPClock) / (MaxNbCycles * Cp) + * Tx : Total video mode horizontal resolution + * IPClock : HQVDP IP clock (Mhz) + * MaxNbCycles: max(InputWidth, OutputWidth) + * Cp: Video mode pixel clock (Mhz) + * + * RETURNS: + * True if the HW can scale. + */ +static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer) +{ + struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); + unsigned long lfw; + unsigned int inv_zy; + + lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000); + lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000; + + inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h); + + return (inv_zy <= lfw) ? true : false; +} + +/** + * sti_hqvdp_prepare_layer + * @layer: hqvdp layer + * @first_prepare: true if it is the first time this function is called + * + * Prepares a command for the firmware + * + * RETURNS: + * 0 on success. + */ +static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare) +{ + struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); + struct sti_hqvdp_cmd *cmd; + int scale_h, scale_v; + int cmd_offset; + + dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); + + /* prepare and commit VID plane */ + hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane, + layer->crtc, layer->fb, + layer->dst_x, layer->dst_y, + layer->dst_w, layer->dst_h, + layer->src_x, layer->src_y, + layer->src_w, layer->src_h); + + cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); + if (cmd_offset == -1) { + DRM_ERROR("No available hqvdp_cmd now\n"); + return -EBUSY; + } + cmd = hqvdp->hqvdp_cmd + cmd_offset; + + if (!sti_hqvdp_check_hw_scaling(layer)) { + DRM_ERROR("Scaling beyond HW capabilities\n"); + return -EINVAL; + } + + /* Static parameters, defaulting to progressive mode */ + cmd->top.config = TOP_CONFIG_PROGRESSIVE; + cmd->top.mem_format = TOP_MEM_FORMAT_DFLT; + cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT; + cmd->csdi.config = CSDI_CONFIG_PROG; + + /* VC1RE, FMD bypassed : keep everything set to 0 + * IQI/P2I bypassed */ + cmd->iqi.config = IQI_CONFIG_DFLT; + cmd->iqi.con_bri = IQI_CON_BRI_DFLT; + cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT; + cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT; + + /* Buffer planes address */ + cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0]; + cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1]; + + /* Pitches */ + cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch = + layer->pitches[0]; + cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch = + layer->pitches[1]; + + /* Input / output size + * Align to upper even value */ + layer->dst_w = ALIGN(layer->dst_w, 2); + layer->dst_h = ALIGN(layer->dst_h, 2); + + if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) || + (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) || + (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) || + (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) { + DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n", + layer->src_w, layer->src_h, + layer->dst_w, layer->dst_h); + return -EINVAL; + } + cmd->top.input_viewport_size = cmd->top.input_frame_size = + layer->src_h << 16 | layer->src_w; + cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w; + cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x; + + /* Handle interlaced */ + if (layer->fb->flags & DRM_MODE_FB_INTERLACED) { + /* Top field to display */ + cmd->top.config = TOP_CONFIG_INTER_TOP; + + /* Update pitches and vert size */ + cmd->top.input_frame_size = (layer->src_h / 2) << 16 | + layer->src_w; + cmd->top.luma_processed_pitch *= 2; + cmd->top.luma_src_pitch *= 2; + cmd->top.chroma_processed_pitch *= 2; + cmd->top.chroma_src_pitch *= 2; + + /* Enable directional deinterlacing processing */ + cmd->csdi.config = CSDI_CONFIG_INTER_DIR; + cmd->csdi.config2 = CSDI_CONFIG2_DFLT; + cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT; + } + + /* Update hvsrc lut coef */ + scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w; + sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc); + + scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h; + sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc); + + if (first_prepare) { + /* Prevent VTG shutdown */ + if (clk_prepare_enable(hqvdp->clk_pix_main)) { + DRM_ERROR("Failed to prepare/enable pix main clk\n"); + return -ENXIO; + } + + /* Register VTG Vsync callback to handle bottom fields */ + if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) && + sti_vtg_register_client(hqvdp->vtg, + &hqvdp->vtg_nb, layer->mixer_id)) { + DRM_ERROR("Cannot register VTG notifier\n"); + return -ENXIO; + } + } + + return 0; +} + +static int sti_hqvdp_commit_layer(struct sti_layer *layer) +{ + struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); + int cmd_offset; + + dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); + + cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); + if (cmd_offset == -1) { + DRM_ERROR("No available hqvdp_cmd now\n"); + return -EBUSY; + } + + writel(hqvdp->hqvdp_cmd_paddr + cmd_offset, + hqvdp->regs + HQVDP_MBX_NEXT_CMD); + + hqvdp->curr_field_count++; + + /* Interlaced : get ready to display the bottom field at next Vsync */ + if (layer->fb->flags & DRM_MODE_FB_INTERLACED) + hqvdp->btm_field_pending = true; + + dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n", + __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset); + + return 0; +} + +static int sti_hqvdp_disable_layer(struct sti_layer *layer) +{ + struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); + int i; + + DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); + + /* Unregister VTG Vsync callback */ + if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) && + sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb)) + DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); + + /* Set next cmd to NULL */ + writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD); + + for (i = 0; i < POLL_MAX_ATTEMPT; i++) { + if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70) + & INFO_XP70_FW_READY) + break; + msleep(POLL_DELAY_MS); + } + + /* VTG can stop now */ + clk_disable_unprepare(hqvdp->clk_pix_main); + + if (i == POLL_MAX_ATTEMPT) { + DRM_ERROR("XP70 could not revert to idle\n"); + return -ENXIO; + } + + /* disable VID plane */ + hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane); + + return 0; +} + +/** + * sti_vdp_vtg_cb + * @nb: notifier block + * @evt: event message + * @data: private data + * + * Handle VTG Vsync event, display pending bottom field + * + * RETURNS: + * 0 on success. + */ +int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data) +{ + struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb); + int btm_cmd_offset, top_cmd_offest; + struct sti_hqvdp_cmd *btm_cmd, *top_cmd; + + if ((evt != VTG_TOP_FIELD_EVENT) && (evt != VTG_BOTTOM_FIELD_EVENT)) { + DRM_DEBUG_DRIVER("Unknown event\n"); + return 0; + } + + if (hqvdp->btm_field_pending) { + /* Create the btm field command from the current one */ + btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); + top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp); + if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) { + DRM_ERROR("Cannot get cmds, skip btm field\n"); + return -EBUSY; + } + + btm_cmd = hqvdp->hqvdp_cmd + btm_cmd_offset; + top_cmd = hqvdp->hqvdp_cmd + top_cmd_offest; + + memcpy(btm_cmd, top_cmd, sizeof(*btm_cmd)); + + btm_cmd->top.config = TOP_CONFIG_INTER_BTM; + btm_cmd->top.current_luma += + btm_cmd->top.luma_src_pitch / 2; + btm_cmd->top.current_chroma += + btm_cmd->top.chroma_src_pitch / 2; + + /* Post the command to mailbox */ + writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset, + hqvdp->regs + HQVDP_MBX_NEXT_CMD); + + hqvdp->curr_field_count++; + hqvdp->btm_field_pending = false; + + dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n", + __func__, hqvdp->hqvdp_cmd_paddr); + } + + return 0; +} + +static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id) +{ + struct drm_plane *plane; + + list_for_each_entry(plane, &dev->mode_config.plane_list, head) { + struct sti_layer *layer = to_sti_layer(plane); + + if (layer->desc == id) + return plane; + } + + return NULL; +} + +static void sti_hqvd_init(struct sti_layer *layer) +{ + struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); + int size; + + /* find the plane macthing with vid 0 */ + hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0); + if (!hqvdp->vid_plane) { + DRM_ERROR("Cannot find Main video layer\n"); + return; + } + + hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb; + + /* Allocate memory for the VDP commands */ + size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd); + hqvdp->hqvdp_cmd = dma_alloc_writecombine(hqvdp->dev, size, + &hqvdp->hqvdp_cmd_paddr, + GFP_KERNEL | GFP_DMA); + if (!hqvdp->hqvdp_cmd) { + DRM_ERROR("Failed to allocate memory for VDP cmd\n"); + return; + } + + memset(hqvdp->hqvdp_cmd, 0, size); +} + +static const struct sti_layer_funcs hqvdp_ops = { + .get_formats = sti_hqvdp_get_formats, + .get_nb_formats = sti_hqvdp_get_nb_formats, + .init = sti_hqvd_init, + .prepare = sti_hqvdp_prepare_layer, + .commit = sti_hqvdp_commit_layer, + .disable = sti_hqvdp_disable_layer, +}; + +struct sti_layer *sti_hqvdp_create(struct device *dev) +{ + struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); + + hqvdp->layer.ops = &hqvdp_ops; + + return &hqvdp->layer; +} +EXPORT_SYMBOL(sti_hqvdp_create); + +static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp) +{ + /* Configure Plugs (same for RD & WR) */ + writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_RD_PLUG_PAGE_SIZE); + writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_RD_PLUG_MIN_OPC); + writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_RD_PLUG_MAX_OPC); + writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_RD_PLUG_MAX_CHK); + writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_RD_PLUG_MAX_MSG); + writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_RD_PLUG_MIN_SPACE); + writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_RD_PLUG_CONTROL); + + writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_WR_PLUG_PAGE_SIZE); + writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_WR_PLUG_MIN_OPC); + writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_WR_PLUG_MAX_OPC); + writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_WR_PLUG_MAX_CHK); + writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_WR_PLUG_MAX_MSG); + writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_WR_PLUG_MIN_SPACE); + writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_WR_PLUG_CONTROL); +} + +/** + * sti_hqvdp_start_xp70 + * @firmware: firmware found + * @ctxt: hqvdp structure + * + * Run the xP70 initialization sequence + */ +static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt) +{ + struct sti_hqvdp *hqvdp = ctxt; + u32 *fw_rd_plug, *fw_wr_plug, *fw_pmem, *fw_dmem; + u8 *data; + int i; + struct fw_header { + int rd_size; + int wr_size; + int pmem_size; + int dmem_size; + } *header; + + DRM_DEBUG_DRIVER("\n"); + /* Check firmware parts */ + if (!firmware) { + DRM_ERROR("Firmware not available\n"); + return; + } + + header = (struct fw_header *) firmware->data; + if (firmware->size < sizeof(*header)) { + DRM_ERROR("Invalid firmware size (%d)\n", firmware->size); + goto out; + } + if ((sizeof(*header) + header->rd_size + header->wr_size + + header->pmem_size + header->dmem_size) != firmware->size) { + DRM_ERROR("Invalid fmw structure (%d+%d+%d+%d+%d != %d)\n", + sizeof(*header), header->rd_size, header->wr_size, + header->pmem_size, header->dmem_size, + firmware->size); + goto out; + } + + data = (u8 *) firmware->data; + data += sizeof(*header); + fw_rd_plug = (void *) data; + data += header->rd_size; + fw_wr_plug = (void *) data; + data += header->wr_size; + fw_pmem = (void *) data; + data += header->pmem_size; + fw_dmem = (void *) data; + + /* Enable clock */ + if (clk_prepare_enable(hqvdp->clk)) + DRM_ERROR("Failed to prepare/enable HQVDP clk\n"); + + /* Reset */ + writel(SW_RESET_CTRL_FULL, hqvdp->regs + HQVDP_MBX_SW_RESET_CTRL); + + for (i = 0; i < POLL_MAX_ATTEMPT; i++) { + if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1) + & STARTUP_CTRL1_RST_DONE) + break; + msleep(POLL_DELAY_MS); + } + if (i == POLL_MAX_ATTEMPT) { + DRM_ERROR("Could not reset\n"); + goto out; + } + + /* Init Read & Write plugs */ + for (i = 0; i < header->rd_size / 4; i++) + writel(fw_rd_plug[i], hqvdp->regs + HQVDP_RD_PLUG + i * 4); + for (i = 0; i < header->wr_size / 4; i++) + writel(fw_wr_plug[i], hqvdp->regs + HQVDP_WR_PLUG + i * 4); + + sti_hqvdp_init_plugs(hqvdp); + + /* Authorize Idle Mode */ + writel(STARTUP_CTRL1_AUTH_IDLE, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1); + + /* Prevent VTG interruption during the boot */ + writel(SOFT_VSYNC_SW_CTRL_IRQ, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC); + writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD); + + /* Download PMEM & DMEM */ + for (i = 0; i < header->pmem_size / 4; i++) + writel(fw_pmem[i], hqvdp->regs + HQVDP_PMEM + i * 4); + for (i = 0; i < header->dmem_size / 4; i++) + writel(fw_dmem[i], hqvdp->regs + HQVDP_DMEM + i * 4); + + /* Enable fetch */ + writel(STARTUP_CTRL2_FETCH_EN, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2); + + /* Wait end of boot */ + for (i = 0; i < POLL_MAX_ATTEMPT; i++) { + if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70) + & INFO_XP70_FW_READY) + break; + msleep(POLL_DELAY_MS); + } + if (i == POLL_MAX_ATTEMPT) { + DRM_ERROR("Could not boot\n"); + goto out; + } + + /* Launch Vsync */ + writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC); + + DRM_INFO("HQVDP XP70 started\n"); +out: + release_firmware(firmware); +} + +int sti_hqvdp_bind(struct device *dev, struct device *master, void *data) +{ + struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct sti_layer *layer; + int err; + + DRM_DEBUG_DRIVER("\n"); + + hqvdp->drm_dev = drm_dev; + + /* Request for firmware */ + err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, + HQVDP_FMW_NAME, hqvdp->dev, + GFP_KERNEL, hqvdp, sti_hqvdp_start_xp70); + if (err) { + DRM_ERROR("Can't get HQVDP firmware\n"); + return err; + } + + layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs); + if (!layer) { + DRM_ERROR("Can't create HQVDP plane\n"); + return -ENOMEM; + } + + sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY); + + return 0; +} + +static void sti_hqvdp_unbind(struct device *dev, + struct device *master, void *data) +{ + /* do nothing */ +} + +static const struct component_ops sti_hqvdp_ops = { + .bind = sti_hqvdp_bind, + .unbind = sti_hqvdp_unbind, +}; + +static int sti_hqvdp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *vtg_np; + struct sti_hqvdp *hqvdp; + struct resource *res; + + DRM_DEBUG_DRIVER("\n"); + + hqvdp = devm_kzalloc(dev, sizeof(*hqvdp), GFP_KERNEL); + if (!hqvdp) { + DRM_ERROR("Failed to allocate HQVDP context\n"); + return -ENOMEM; + } + + hqvdp->dev = dev; + + /* Get Memory resources */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + DRM_ERROR("Get memory resource failed\n"); + return -ENXIO; + } + hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res)); + if (hqvdp->regs == NULL) { + DRM_ERROR("Register mapping failed\n"); + return -ENXIO; + } + + /* Get clock resources */ + hqvdp->clk = devm_clk_get(dev, "hqvdp"); + hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main"); + if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk)) { + DRM_ERROR("Cannot get clocks\n"); + return -ENXIO; + } + + /* Get reset resources */ + hqvdp->reset = devm_reset_control_get(dev, "hqvdp"); + if (!IS_ERR(hqvdp->reset)) + reset_control_deassert(hqvdp->reset); + + vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0); + if (vtg_np) + hqvdp->vtg = of_vtg_find(vtg_np); + + platform_set_drvdata(pdev, hqvdp); + + return component_add(&pdev->dev, &sti_hqvdp_ops); +} + +static int sti_hqvdp_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &sti_hqvdp_ops); + return 0; +} + +static struct of_device_id hqvdp_of_match[] = { + { .compatible = "st,stih407-hqvdp", }, + { /* end node */ } +}; +MODULE_DEVICE_TABLE(of, hqvdp_of_match); + +struct platform_driver sti_hqvdp_driver = { + .driver = { + .name = "sti-hqvdp", + .owner = THIS_MODULE, + .of_match_table = hqvdp_of_match, + }, + .probe = sti_hqvdp_probe, + .remove = sti_hqvdp_remove, +}; + +module_platform_driver(sti_hqvdp_driver); + +MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h new file mode 100644 index 000000000000..cd5ecd0a6dea --- /dev/null +++ b/drivers/gpu/drm/sti/sti_hqvdp.h @@ -0,0 +1,12 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_HQVDP_H_ +#define _STI_HQVDP_H_ + +struct sti_layer *sti_hqvdp_create(struct device *dev); + +#endif diff --git a/drivers/gpu/drm/sti/sti_hqvdp_lut.h b/drivers/gpu/drm/sti/sti_hqvdp_lut.h new file mode 100644 index 000000000000..619af7f4384e --- /dev/null +++ b/drivers/gpu/drm/sti/sti_hqvdp_lut.h @@ -0,0 +1,373 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_HQVDP_LUT_H_ +#define _STI_HQVDP_LUT_H_ + +#define NB_COEF 128 + +#define SHIFT_LUT_A_LEGACY 8 +#define SHIFT_LUT_B 8 +#define SHIFT_LUT_C_Y_LEGACY 8 +#define SHIFT_LUT_C_C_LEGACY 8 +#define SHIFT_LUT_D_Y_LEGACY 8 +#define SHIFT_LUT_D_C_LEGACY 8 +#define SHIFT_LUT_E_Y_LEGACY 8 +#define SHIFT_LUT_E_C_LEGACY 8 +#define SHIFT_LUT_F_Y_LEGACY 8 +#define SHIFT_LUT_F_C_LEGACY 8 + +static const u32 coef_lut_a_legacy[NB_COEF] = { + 0x0000ffff, 0x00010000, 0x000100ff, 0x00000000, + 0x00000000, 0x00050000, 0xfffc00ff, 0x00000000, + 0x00000000, 0x00090000, 0xfff900fe, 0x00000000, + 0x00000000, 0x0010ffff, 0xfff600fb, 0x00000000, + 0x00000000, 0x0017fffe, 0xfff400f7, 0x00000000, + 0x00000000, 0x001ffffd, 0xfff200f2, 0x00000000, + 0x00000000, 0x0027fffc, 0xfff100ec, 0x00000000, + 0x00000000, 0x0030fffb, 0xfff000e5, 0x00000000, + 0x00000000, 0x003afffa, 0xffee00de, 0x00000000, + 0x00000000, 0x0044fff9, 0xffed00d6, 0x00000000, + 0x00000000, 0x004efff8, 0xffed00cd, 0x00000000, + 0x00000000, 0x0059fff6, 0xffed00c4, 0x00000000, + 0x00000000, 0x0064fff5, 0xffed00ba, 0x00000000, + 0x00000000, 0x006ffff3, 0xffee00b0, 0x00000000, + 0x00000000, 0x007afff2, 0xffee00a6, 0x00000000, + 0x00000000, 0x0085fff1, 0xffef009b, 0x00000000, + 0x00000000, 0x0090fff0, 0xfff00090, 0x00000000, + 0x00000000, 0x009bffef, 0xfff10085, 0x00000000, + 0x00000000, 0x00a6ffee, 0xfff2007a, 0x00000000, + 0x00000000, 0x00b0ffee, 0xfff3006f, 0x00000000, + 0x00000000, 0x00baffed, 0xfff50064, 0x00000000, + 0x00000000, 0x00c4ffed, 0xfff60059, 0x00000000, + 0x00000000, 0x00cdffed, 0xfff8004e, 0x00000000, + 0x00000000, 0x00d6ffed, 0xfff90044, 0x00000000, + 0x00000000, 0x00deffee, 0xfffa003a, 0x00000000, + 0x00000000, 0x00e5fff0, 0xfffb0030, 0x00000000, + 0x00000000, 0x00ecfff1, 0xfffc0027, 0x00000000, + 0x00000000, 0x00f2fff2, 0xfffd001f, 0x00000000, + 0x00000000, 0x00f7fff4, 0xfffe0017, 0x00000000, + 0x00000000, 0x00fbfff6, 0xffff0010, 0x00000000, + 0x00000000, 0x00fefff9, 0x00000009, 0x00000000, + 0x00000000, 0x00fffffc, 0x00000005, 0x00000000 +}; + +static const u32 coef_lut_b[NB_COEF] = { + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000100, 0x00000000 +}; + +static const u32 coef_lut_c_y_legacy[NB_COEF] = { + 0x00060004, 0x0038ffe1, 0x003800be, 0x0006ffe1, + 0x00050005, 0x0042ffe1, 0x003800b3, 0x0007ffe1, + 0x00040006, 0x0046ffe1, 0x003300b2, 0x0008ffe2, + 0x00030007, 0x004cffe1, 0x002e00b1, 0x0008ffe2, + 0x00020006, 0x0051ffe2, 0x002900b0, 0x0009ffe3, + 0x00010008, 0x0056ffe2, 0x002400ae, 0x0009ffe4, + 0xffff0008, 0x005cffe3, 0x001f00ad, 0x000affe4, + 0xfffe0008, 0x0062ffe4, 0x001a00ab, 0x000affe5, + 0xfffd000a, 0x0066ffe5, 0x001500a8, 0x000bffe6, + 0xfffc0009, 0x006bffe7, 0x001100a5, 0x000bffe8, + 0xfffa000a, 0x0070ffe8, 0x000d00a3, 0x000bffe9, + 0xfff9000b, 0x0076ffea, 0x0008009f, 0x000bffea, + 0xfff7000b, 0x007affec, 0x0005009b, 0x000cffec, + 0xfff6000b, 0x007effef, 0x00010098, 0x000cffed, + 0xfff4000b, 0x0084fff1, 0xfffd0095, 0x000cffee, + 0xfff3000b, 0x0088fff4, 0xfffa0090, 0x000cfff0, + 0xfff1000b, 0x008dfff7, 0xfff7008d, 0x000bfff1, + 0xfff0000c, 0x0090fffa, 0xfff40088, 0x000bfff3, + 0xffee000c, 0x0095fffd, 0xfff10084, 0x000bfff4, + 0xffed000c, 0x00980001, 0xffef007e, 0x000bfff6, + 0xffec000c, 0x009b0005, 0xffec007a, 0x000bfff7, + 0xffea000b, 0x009f0008, 0xffea0076, 0x000bfff9, + 0xffe9000b, 0x00a3000d, 0xffe80070, 0x000afffa, + 0xffe8000b, 0x00a50011, 0xffe7006b, 0x0009fffc, + 0xffe6000b, 0x00a80015, 0xffe50066, 0x000afffd, + 0xffe5000a, 0x00ab001a, 0xffe40062, 0x0008fffe, + 0xffe4000a, 0x00ad001f, 0xffe3005c, 0x0008ffff, + 0xffe40009, 0x00ae0024, 0xffe20056, 0x00080001, + 0xffe30009, 0x00b00029, 0xffe20051, 0x00060002, + 0xffe20008, 0x00b1002e, 0xffe1004c, 0x00070003, + 0xffe20008, 0x00b20033, 0xffe10046, 0x00060004, + 0xffe10007, 0x00b30038, 0xffe10042, 0x00050005 +}; + +static const u32 coef_lut_c_c_legacy[NB_COEF] = { + 0x0001fff3, 0x003afffb, 0x003a00a1, 0x0001fffb, + 0x0001fff5, 0x0041fffb, 0x0038009a, 0x0001fffb, + 0x0001fff5, 0x0046fffb, 0x00340099, 0x0001fffb, + 0x0001fff7, 0x0049fffb, 0x00300098, 0x0001fffb, + 0x0001fff9, 0x004cfffb, 0x002d0096, 0x0001fffb, + 0x0001fffa, 0x004ffffc, 0x00290095, 0x0001fffb, + 0x0001fff9, 0x0054fffd, 0x00250093, 0x0001fffc, + 0x0001fffa, 0x0058fffd, 0x00220092, 0x0000fffc, + 0x0001fffb, 0x005bfffe, 0x001f0090, 0x0000fffc, + 0x0001fffd, 0x005effff, 0x001c008c, 0x0000fffd, + 0x0001fffd, 0x00620000, 0x0019008a, 0x0000fffd, + 0x0001fffe, 0x00660001, 0x00160088, 0xfffffffd, + 0x0000fffe, 0x006a0003, 0x00130085, 0xfffffffe, + 0x0000fffe, 0x006e0004, 0x00100083, 0xfffffffe, + 0x0000fffe, 0x00710006, 0x000e007f, 0xffffffff, + 0x0000fffe, 0x00750008, 0x000c007c, 0xfffeffff, + 0xfffffffe, 0x0079000a, 0x000a0079, 0xfffeffff, + 0xfffffffe, 0x007c000c, 0x00080075, 0xfffe0000, + 0xffffffff, 0x007f000e, 0x00060071, 0xfffe0000, + 0xfffeffff, 0x00830010, 0x0004006e, 0xfffe0000, + 0xfffeffff, 0x00850013, 0x0003006a, 0xfffe0000, + 0xfffdffff, 0x00880016, 0x00010066, 0xfffe0001, + 0xfffd0000, 0x008a0019, 0x00000062, 0xfffd0001, + 0xfffd0000, 0x008c001c, 0xffff005e, 0xfffd0001, + 0xfffc0000, 0x0090001f, 0xfffe005b, 0xfffb0001, + 0xfffc0000, 0x00920022, 0xfffd0058, 0xfffa0001, + 0xfffc0001, 0x00930025, 0xfffd0054, 0xfff90001, + 0xfffb0001, 0x00950029, 0xfffc004f, 0xfffa0001, + 0xfffb0001, 0x0096002d, 0xfffb004c, 0xfff90001, + 0xfffb0001, 0x00980030, 0xfffb0049, 0xfff70001, + 0xfffb0001, 0x00990034, 0xfffb0046, 0xfff50001, + 0xfffb0001, 0x009a0038, 0xfffb0041, 0xfff50001 +}; + +static const u32 coef_lut_d_y_legacy[NB_COEF] = { + 0xfff80009, 0x0046ffec, 0x004600a3, 0xfff8ffec, + 0xfff70009, 0x004effed, 0x0044009d, 0xfff9ffeb, + 0xfff6000a, 0x0052ffee, 0x003f009d, 0xfffaffea, + 0xfff50009, 0x0057ffef, 0x003b009d, 0xfffbffe9, + 0xfff50008, 0x005bfff0, 0x0037009c, 0xfffcffe9, + 0xfff40008, 0x005ffff2, 0x0033009b, 0xfffcffe9, + 0xfff30007, 0x0064fff3, 0x002f009b, 0xfffdffe8, + 0xfff20007, 0x0068fff5, 0x002b0099, 0xfffeffe8, + 0xfff10008, 0x006bfff7, 0x00270097, 0xffffffe8, + 0xfff00007, 0x006ffff9, 0x00230097, 0xffffffe8, + 0xffef0006, 0x0073fffb, 0x00200095, 0x0000ffe8, + 0xffee0005, 0x0077fffe, 0x001c0093, 0x0000ffe9, + 0xffee0005, 0x007a0000, 0x00180091, 0x0001ffe9, + 0xffed0005, 0x007d0003, 0x0015008e, 0x0002ffe9, + 0xffec0005, 0x00800006, 0x0012008b, 0x0002ffea, + 0xffeb0004, 0x00840008, 0x000e008a, 0x0003ffea, + 0xffeb0003, 0x0087000b, 0x000b0087, 0x0003ffeb, + 0xffea0003, 0x008a000e, 0x00080084, 0x0004ffeb, + 0xffea0002, 0x008b0012, 0x00060080, 0x0005ffec, + 0xffe90002, 0x008e0015, 0x0003007d, 0x0005ffed, + 0xffe90001, 0x00910018, 0x0000007a, 0x0005ffee, + 0xffe90000, 0x0093001c, 0xfffe0077, 0x0005ffee, + 0xffe80000, 0x00950020, 0xfffb0073, 0x0006ffef, + 0xffe8ffff, 0x00970023, 0xfff9006f, 0x0007fff0, + 0xffe8ffff, 0x00970027, 0xfff7006b, 0x0008fff1, + 0xffe8fffe, 0x0099002b, 0xfff50068, 0x0007fff2, + 0xffe8fffd, 0x009b002f, 0xfff30064, 0x0007fff3, + 0xffe9fffc, 0x009b0033, 0xfff2005f, 0x0008fff4, + 0xffe9fffc, 0x009c0037, 0xfff0005b, 0x0008fff5, + 0xffe9fffb, 0x009d003b, 0xffef0057, 0x0009fff5, + 0xffeafffa, 0x009d003f, 0xffee0052, 0x000afff6, + 0xffebfff9, 0x009d0044, 0xffed004e, 0x0009fff7 +}; + +static const u32 coef_lut_d_c_legacy[NB_COEF] = { + 0xfffeffff, 0x003fffff, 0x003f0089, 0xfffeffff, + 0xfffe0000, 0x00460000, 0x0042007d, 0xfffffffe, + 0xfffe0000, 0x00490001, 0x003f007d, 0xfffffffd, + 0xfffd0001, 0x004b0002, 0x003c007d, 0x0000fffc, + 0xfffd0001, 0x004e0003, 0x0039007c, 0x0000fffc, + 0xfffc0001, 0x00510005, 0x0036007c, 0x0000fffb, + 0xfffc0001, 0x00540006, 0x0033007b, 0x0001fffa, + 0xfffc0003, 0x00550008, 0x00310078, 0x0001fffa, + 0xfffb0003, 0x00580009, 0x002e0078, 0x0001fffa, + 0xfffb0002, 0x005b000b, 0x002b0077, 0x0002fff9, + 0xfffa0003, 0x005e000d, 0x00280075, 0x0002fff9, + 0xfffa0002, 0x0060000f, 0x00260074, 0x0002fff9, + 0xfffa0004, 0x00610011, 0x00230072, 0x0002fff9, + 0xfffa0004, 0x00640013, 0x00200070, 0x0002fff9, + 0xfff90004, 0x00660015, 0x001e006e, 0x0003fff9, + 0xfff90004, 0x00680017, 0x001c006c, 0x0003fff9, + 0xfff90003, 0x006b0019, 0x0019006b, 0x0003fff9, + 0xfff90003, 0x006c001c, 0x00170068, 0x0004fff9, + 0xfff90003, 0x006e001e, 0x00150066, 0x0004fff9, + 0xfff90002, 0x00700020, 0x00130064, 0x0004fffa, + 0xfff90002, 0x00720023, 0x00110061, 0x0004fffa, + 0xfff90002, 0x00740026, 0x000f0060, 0x0002fffa, + 0xfff90002, 0x00750028, 0x000d005e, 0x0003fffa, + 0xfff90002, 0x0077002b, 0x000b005b, 0x0002fffb, + 0xfffa0001, 0x0078002e, 0x00090058, 0x0003fffb, + 0xfffa0001, 0x00780031, 0x00080055, 0x0003fffc, + 0xfffa0001, 0x007b0033, 0x00060054, 0x0001fffc, + 0xfffb0000, 0x007c0036, 0x00050051, 0x0001fffc, + 0xfffc0000, 0x007c0039, 0x0003004e, 0x0001fffd, + 0xfffc0000, 0x007d003c, 0x0002004b, 0x0001fffd, + 0xfffdffff, 0x007d003f, 0x00010049, 0x0000fffe, + 0xfffeffff, 0x007d0042, 0x00000046, 0x0000fffe +}; + +static const u32 coef_lut_e_y_legacy[NB_COEF] = { + 0xfff10001, 0x00490004, 0x00490083, 0xfff10004, + 0xfff10000, 0x00500006, 0x004b007b, 0xfff10002, + 0xfff10000, 0x00530007, 0x0048007b, 0xfff10001, + 0xfff10000, 0x00550009, 0x0046007a, 0xfff10000, + 0xfff1fffe, 0x0058000b, 0x0043007b, 0xfff2fffe, + 0xfff1ffff, 0x005a000d, 0x0040007a, 0xfff2fffd, + 0xfff1fffd, 0x005d000f, 0x003e007a, 0xfff2fffc, + 0xfff1fffd, 0x005f0011, 0x003b0079, 0xfff3fffb, + 0xfff1fffc, 0x00610013, 0x00390079, 0xfff3fffa, + 0xfff1fffb, 0x00640015, 0x00360079, 0xfff3fff9, + 0xfff1fffa, 0x00660017, 0x00340078, 0xfff4fff8, + 0xfff1fffb, 0x00680019, 0x00310077, 0xfff4fff7, + 0xfff2fff9, 0x006a001b, 0x002f0076, 0xfff5fff6, + 0xfff2fff9, 0x006c001e, 0x002c0075, 0xfff5fff5, + 0xfff2fff9, 0x006d0020, 0x002a0073, 0xfff6fff5, + 0xfff3fff7, 0x00700022, 0x00270073, 0xfff6fff4, + 0xfff3fff7, 0x00710025, 0x00250071, 0xfff7fff3, + 0xfff4fff6, 0x00730027, 0x00220070, 0xfff7fff3, + 0xfff5fff6, 0x0073002a, 0x0020006d, 0xfff9fff2, + 0xfff5fff5, 0x0075002c, 0x001e006c, 0xfff9fff2, + 0xfff6fff5, 0x0076002f, 0x001b006a, 0xfff9fff2, + 0xfff7fff4, 0x00770031, 0x00190068, 0xfffbfff1, + 0xfff8fff4, 0x00780034, 0x00170066, 0xfffafff1, + 0xfff9fff3, 0x00790036, 0x00150064, 0xfffbfff1, + 0xfffafff3, 0x00790039, 0x00130061, 0xfffcfff1, + 0xfffbfff3, 0x0079003b, 0x0011005f, 0xfffdfff1, + 0xfffcfff2, 0x007a003e, 0x000f005d, 0xfffdfff1, + 0xfffdfff2, 0x007a0040, 0x000d005a, 0xfffffff1, + 0xfffefff2, 0x007b0043, 0x000b0058, 0xfffefff1, + 0x0000fff1, 0x007a0046, 0x00090055, 0x0000fff1, + 0x0001fff1, 0x007b0048, 0x00070053, 0x0000fff1, + 0x0002fff1, 0x007b004b, 0x00060050, 0x0000fff1 +}; + +static const u32 coef_lut_e_c_legacy[NB_COEF] = { + 0xfffa0001, 0x003f0010, 0x003f006d, 0xfffa0010, + 0xfffb0002, 0x00440011, 0x00440062, 0xfffa000e, + 0xfffb0001, 0x00460013, 0x00420062, 0xfffa000d, + 0xfffb0000, 0x00480014, 0x00410062, 0xfffa000c, + 0xfffb0001, 0x00490015, 0x003f0061, 0xfffb000b, + 0xfffb0000, 0x004b0017, 0x003d0061, 0xfffb000a, + 0xfffb0000, 0x004d0018, 0x003b0062, 0xfffb0008, + 0xfffcffff, 0x004f001a, 0x00390061, 0xfffb0007, + 0xfffc0000, 0x004f001c, 0x00380060, 0xfffb0006, + 0xfffcffff, 0x0052001d, 0x00360060, 0xfffb0005, + 0xfffdfffe, 0x0053001f, 0x00340060, 0xfffb0004, + 0xfffdfffe, 0x00540021, 0x0032005e, 0xfffc0004, + 0xfffeffff, 0x00550022, 0x0030005d, 0xfffc0003, + 0xfffeffff, 0x00560024, 0x002f005c, 0xfffc0002, + 0xfffffffd, 0x00580026, 0x002d005c, 0xfffc0001, + 0xfffffffd, 0x005a0027, 0x002b005c, 0xfffc0000, + 0x0000fffd, 0x005a0029, 0x0029005a, 0xfffd0000, + 0x0000fffc, 0x005c002b, 0x0027005a, 0xfffdffff, + 0x0001fffc, 0x005c002d, 0x00260058, 0xfffdffff, + 0x0002fffc, 0x005c002f, 0x00240056, 0xfffffffe, + 0x0003fffc, 0x005d0030, 0x00220055, 0xfffffffe, + 0x0004fffc, 0x005e0032, 0x00210054, 0xfffefffd, + 0x0004fffb, 0x00600034, 0x001f0053, 0xfffefffd, + 0x0005fffb, 0x00600036, 0x001d0052, 0xfffffffc, + 0x0006fffb, 0x00600038, 0x001c004f, 0x0000fffc, + 0x0007fffb, 0x00610039, 0x001a004f, 0xfffffffc, + 0x0008fffb, 0x0062003b, 0x0018004d, 0x0000fffb, + 0x000afffb, 0x0061003d, 0x0017004b, 0x0000fffb, + 0x000bfffb, 0x0061003f, 0x00150049, 0x0001fffb, + 0x000cfffa, 0x00620041, 0x00140048, 0x0000fffb, + 0x000dfffa, 0x00620042, 0x00130046, 0x0001fffb, + 0x000efffa, 0x00620044, 0x00110044, 0x0002fffb +}; + +static const u32 coef_lut_f_y_legacy[NB_COEF] = { + 0xfff6fff0, 0x00490012, 0x0049006e, 0xfff60012, + 0xfff7fff1, 0x004e0013, 0x00490068, 0xfff60010, + 0xfff7fff2, 0x004f0015, 0x00470067, 0xfff6000f, + 0xfff7fff5, 0x004f0017, 0x00450065, 0xfff6000e, + 0xfff8fff5, 0x00500018, 0x00440065, 0xfff6000c, + 0xfff8fff6, 0x0051001a, 0x00420064, 0xfff6000b, + 0xfff8fff6, 0x0052001c, 0x00400064, 0xfff6000a, + 0xfff9fff6, 0x0054001d, 0x003e0064, 0xfff60008, + 0xfff9fff8, 0x0054001f, 0x003c0063, 0xfff60007, + 0xfffafff8, 0x00550021, 0x003a0062, 0xfff60006, + 0xfffbfff7, 0x00560022, 0x00390062, 0xfff60005, + 0xfffbfff8, 0x00570024, 0x00370061, 0xfff60004, + 0xfffcfff8, 0x00580026, 0x00350060, 0xfff60003, + 0xfffdfff8, 0x00590028, 0x0033005f, 0xfff60002, + 0xfffdfff7, 0x005b002a, 0x0031005f, 0xfff60001, + 0xfffefff7, 0x005c002c, 0x002f005e, 0xfff60000, + 0xfffffff6, 0x005e002d, 0x002d005e, 0xfff6ffff, + 0x0000fff6, 0x005e002f, 0x002c005c, 0xfff7fffe, + 0x0001fff6, 0x005f0031, 0x002a005b, 0xfff7fffd, + 0x0002fff6, 0x005f0033, 0x00280059, 0xfff8fffd, + 0x0003fff6, 0x00600035, 0x00260058, 0xfff8fffc, + 0x0004fff6, 0x00610037, 0x00240057, 0xfff8fffb, + 0x0005fff6, 0x00620039, 0x00220056, 0xfff7fffb, + 0x0006fff6, 0x0062003a, 0x00210055, 0xfff8fffa, + 0x0007fff6, 0x0063003c, 0x001f0054, 0xfff8fff9, + 0x0008fff6, 0x0064003e, 0x001d0054, 0xfff6fff9, + 0x000afff6, 0x00640040, 0x001c0052, 0xfff6fff8, + 0x000bfff6, 0x00640042, 0x001a0051, 0xfff6fff8, + 0x000cfff6, 0x00650044, 0x00180050, 0xfff5fff8, + 0x000efff6, 0x00650045, 0x0017004f, 0xfff5fff7, + 0x000ffff6, 0x00670047, 0x0015004f, 0xfff2fff7, + 0x0010fff6, 0x00680049, 0x0013004e, 0xfff1fff7 +}; + +static const u32 coef_lut_f_c_legacy[NB_COEF] = { + 0x0000fffb, 0x003a001a, 0x003a005d, 0x0000001a, + 0x0001fffb, 0x003f001b, 0x00400051, 0x00000019, + 0x0001fffc, 0x0040001c, 0x003f0051, 0x00000017, + 0x0002fffb, 0x0042001d, 0x003e0051, 0xffff0016, + 0x0002fffb, 0x0043001e, 0x003d0051, 0xffff0015, + 0x0003fffc, 0x00430020, 0x003b0050, 0xffff0014, + 0x0003fffb, 0x00450021, 0x003a0051, 0xfffe0013, + 0x0004fffc, 0x00450022, 0x00390050, 0xfffe0012, + 0x0005fffc, 0x00460023, 0x0038004f, 0xfffe0011, + 0x0005fffb, 0x00480025, 0x00360050, 0xfffd0010, + 0x0006fffc, 0x00480026, 0x0035004f, 0xfffd000f, + 0x0006fffc, 0x00490027, 0x0034004f, 0xfffd000e, + 0x0007fffd, 0x00490028, 0x0033004e, 0xfffd000d, + 0x0008fffc, 0x004a002a, 0x0031004d, 0xfffd000d, + 0x0009fffd, 0x004a002b, 0x0030004d, 0xfffc000c, + 0x0009fffc, 0x004c002c, 0x002f004d, 0xfffc000b, + 0x000afffc, 0x004c002e, 0x002e004c, 0xfffc000a, + 0x000bfffc, 0x004d002f, 0x002c004c, 0xfffc0009, + 0x000cfffc, 0x004d0030, 0x002b004a, 0xfffd0009, + 0x000dfffd, 0x004d0031, 0x002a004a, 0xfffc0008, + 0x000dfffd, 0x004e0033, 0x00280049, 0xfffd0007, + 0x000efffd, 0x004f0034, 0x00270049, 0xfffc0006, + 0x000ffffd, 0x004f0035, 0x00260048, 0xfffc0006, + 0x0010fffd, 0x00500036, 0x00250048, 0xfffb0005, + 0x0011fffe, 0x004f0038, 0x00230046, 0xfffc0005, + 0x0012fffe, 0x00500039, 0x00220045, 0xfffc0004, + 0x0013fffe, 0x0051003a, 0x00210045, 0xfffb0003, + 0x0014ffff, 0x0050003b, 0x00200043, 0xfffc0003, + 0x0015ffff, 0x0051003d, 0x001e0043, 0xfffb0002, + 0x0016ffff, 0x0051003e, 0x001d0042, 0xfffb0002, + 0x00170000, 0x0051003f, 0x001c0040, 0xfffc0001, + 0x00190000, 0x00510040, 0x001b003f, 0xfffb0001 +}; + +#endif diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c index 06a587c4f1bb..899104f9d4bc 100644 --- a/drivers/gpu/drm/sti/sti_layer.c +++ b/drivers/gpu/drm/sti/sti_layer.c @@ -11,7 +11,9 @@ #include <drm/drm_fb_cma_helper.h> #include "sti_compositor.h" +#include "sti_cursor.h" #include "sti_gdp.h" +#include "sti_hqvdp.h" #include "sti_layer.h" #include "sti_vid.h" @@ -32,10 +34,13 @@ const char *sti_layer_to_str(struct sti_layer *layer) return "VID1"; case STI_CURSOR: return "CURSOR"; + case STI_HQVDP_0: + return "HQVDP0"; default: return "<UNKNOWN LAYER>"; } } +EXPORT_SYMBOL(sti_layer_to_str); struct sti_layer *sti_layer_create(struct device *dev, int desc, void __iomem *baseaddr) @@ -50,6 +55,12 @@ struct sti_layer *sti_layer_create(struct device *dev, int desc, case STI_VID: layer = sti_vid_create(dev); break; + case STI_CUR: + layer = sti_cursor_create(dev); + break; + case STI_VDP: + layer = sti_hqvdp_create(dev); + break; } if (!layer) { @@ -67,8 +78,11 @@ struct sti_layer *sti_layer_create(struct device *dev, int desc, return layer; } +EXPORT_SYMBOL(sti_layer_create); -int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb, +int sti_layer_prepare(struct sti_layer *layer, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, struct drm_display_mode *mode, int mixer_id, int dest_x, int dest_y, int dest_w, int dest_h, int src_x, int src_y, int src_w, int src_h) @@ -88,6 +102,7 @@ int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb, return 1; } + layer->crtc = crtc; layer->fb = fb; layer->mode = mode; layer->mixer_id = mixer_id; @@ -100,6 +115,7 @@ int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb, layer->src_w = src_w; layer->src_h = src_h; layer->format = fb->pixel_format; + layer->vaddr = cma_obj->vaddr; layer->paddr = cma_obj->paddr; for (i = 0; i < 4; i++) { layer->pitches[i] = fb->pitches[i]; diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h index 198c3774cc12..ceff497f557e 100644 --- a/drivers/gpu/drm/sti/sti_layer.h +++ b/drivers/gpu/drm/sti/sti_layer.h @@ -22,7 +22,8 @@ enum sti_layer_type { STI_GDP = 1 << STI_LAYER_TYPE_SHIFT, STI_VID = 2 << STI_LAYER_TYPE_SHIFT, STI_CUR = 3 << STI_LAYER_TYPE_SHIFT, - STI_BCK = 4 << STI_LAYER_TYPE_SHIFT + STI_BCK = 4 << STI_LAYER_TYPE_SHIFT, + STI_VDP = 5 << STI_LAYER_TYPE_SHIFT }; enum sti_layer_id_of_type { @@ -39,6 +40,7 @@ enum sti_layer_desc { STI_GDP_3 = STI_GDP | STI_ID_3, STI_VID_0 = STI_VID | STI_ID_0, STI_VID_1 = STI_VID | STI_ID_1, + STI_HQVDP_0 = STI_VDP | STI_ID_0, STI_CURSOR = STI_CUR, STI_BACK = STI_BCK }; @@ -67,6 +69,7 @@ struct sti_layer_funcs { * * @plane: drm plane it is bound to (if any) * @fb: drm fb it is bound to + * @crtc: crtc it is bound to * @mode: display mode * @desc: layer type & id * @device: driver device @@ -82,11 +85,13 @@ struct sti_layer_funcs { * @format: format * @pitches: pitch of 'planes' (eg: Y, U, V) * @offsets: offset of 'planes' + * @vaddr: virtual address of the input buffer * @paddr: physical address of the input buffer */ struct sti_layer { struct drm_plane plane; struct drm_framebuffer *fb; + struct drm_crtc *crtc; struct drm_display_mode *mode; enum sti_layer_desc desc; struct device *dev; @@ -102,12 +107,15 @@ struct sti_layer { uint32_t format; unsigned int pitches[4]; unsigned int offsets[4]; + void *vaddr; dma_addr_t paddr; }; struct sti_layer *sti_layer_create(struct device *dev, int desc, void __iomem *baseaddr); -int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb, +int sti_layer_prepare(struct sti_layer *layer, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, struct drm_display_mode *mode, int mixer_id, int dest_x, int dest_y, diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c index 79f369db9fb6..13a4b84deab6 100644 --- a/drivers/gpu/drm/sti/sti_mixer.c +++ b/drivers/gpu/drm/sti/sti_mixer.c @@ -45,6 +45,7 @@ static const u32 mixerColorSpaceMatIdentity[] = { #define GAM_CTL_GDP1_MASK BIT(4) #define GAM_CTL_GDP2_MASK BIT(5) #define GAM_CTL_GDP3_MASK BIT(6) +#define GAM_CTL_CURSOR_MASK BIT(9) const char *sti_mixer_to_str(struct sti_mixer *mixer) { @@ -122,11 +123,15 @@ int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer) layer_id = GAM_DEPTH_GDP3_ID; break; case STI_VID_0: + case STI_HQVDP_0: layer_id = GAM_DEPTH_VID0_ID; break; case STI_VID_1: layer_id = GAM_DEPTH_VID1_ID; break; + case STI_CURSOR: + /* no need to set depth for cursor */ + return 0; default: DRM_ERROR("Unknown layer %d\n", layer->desc); return 1; @@ -185,9 +190,12 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer) case STI_GDP_3: return GAM_CTL_GDP3_MASK; case STI_VID_0: + case STI_HQVDP_0: return GAM_CTL_VID0_MASK; case STI_VID_1: return GAM_CTL_VID1_MASK; + case STI_CURSOR: + return GAM_CTL_CURSOR_MASK; default: return 0; } @@ -215,6 +223,15 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer, return 0; } +void sti_mixer_clear_all_layers(struct sti_mixer *mixer) +{ + u32 val; + + DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer)); + val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000; + sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val); +} + void sti_mixer_set_matrix(struct sti_mixer *mixer) { unsigned int i; diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h index 874372102e52..b97282182908 100644 --- a/drivers/gpu/drm/sti/sti_mixer.h +++ b/drivers/gpu/drm/sti/sti_mixer.h @@ -23,6 +23,7 @@ * @id: id of the mixer * @drm_crtc: crtc object link to the mixer * @pending_event: set if a flip event is pending on crtc + * @enabled: to know if the mixer is active or not */ struct sti_mixer { struct device *dev; @@ -30,6 +31,7 @@ struct sti_mixer { int id; struct drm_crtc drm_crtc; struct drm_pending_vblank_event *pending_event; + bool enabled; }; const char *sti_mixer_to_str(struct sti_mixer *mixer); @@ -39,6 +41,7 @@ struct sti_mixer *sti_mixer_create(struct device *dev, int id, int sti_mixer_set_layer_status(struct sti_mixer *mixer, struct sti_layer *layer, bool status); +void sti_mixer_clear_all_layers(struct sti_mixer *mixer); int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer); int sti_mixer_active_video_area(struct sti_mixer *mixer, struct drm_display_mode *mode); diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index b8afe490356a..cb924aa2b321 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -16,6 +16,8 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> +#include "sti_drm_crtc.h" + /* glue registers */ #define TVO_CSC_MAIN_M0 0x000 #define TVO_CSC_MAIN_M1 0x004 @@ -96,7 +98,7 @@ #define TVO_SYNC_HD_DCS_SHIFT 8 -#define ENCODER_MAIN_CRTC_MASK BIT(0) +#define ENCODER_CRTC_MASK (BIT(0) | BIT(1)) /* enum listing the supported output data format */ enum sti_tvout_video_out_type { @@ -149,14 +151,15 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset) * Set the clipping mode of a VIP * * @tvout: tvout structure + * @reg: register to set * @cr_r: * @y_g: * @cb_b: */ -static void tvout_vip_set_color_order(struct sti_tvout *tvout, +static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg, u32 cr_r, u32 y_g, u32 cb_b) { - u32 val = tvout_read(tvout, TVO_VIP_HDMI); + u32 val = tvout_read(tvout, reg); val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT); val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT); @@ -165,52 +168,58 @@ static void tvout_vip_set_color_order(struct sti_tvout *tvout, val |= y_g << TVO_VIP_REORDER_G_SHIFT; val |= cb_b << TVO_VIP_REORDER_B_SHIFT; - tvout_write(tvout, val, TVO_VIP_HDMI); + tvout_write(tvout, val, reg); } /** * Set the clipping mode of a VIP * * @tvout: tvout structure + * @reg: register to set * @range: clipping range */ -static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, u32 range) +static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, int reg, u32 range) { - u32 val = tvout_read(tvout, TVO_VIP_HDMI); + u32 val = tvout_read(tvout, reg); val &= ~(TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT); val |= range << TVO_VIP_CLIP_SHIFT; - tvout_write(tvout, val, TVO_VIP_HDMI); + tvout_write(tvout, val, reg); } /** * Set the rounded value of a VIP * * @tvout: tvout structure + * @reg: register to set * @rnd: rounded val per component */ -static void tvout_vip_set_rnd(struct sti_tvout *tvout, u32 rnd) +static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd) { - u32 val = tvout_read(tvout, TVO_VIP_HDMI); + u32 val = tvout_read(tvout, reg); val &= ~(TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT); val |= rnd << TVO_VIP_RND_SHIFT; - tvout_write(tvout, val, TVO_VIP_HDMI); + tvout_write(tvout, val, reg); } /** * Select the VIP input * * @tvout: tvout structure + * @reg: register to set + * @main_path: main or auxiliary path + * @sel_input_logic_inverted: need to invert the logic * @sel_input: selected_input (main/aux + conv) */ static void tvout_vip_set_sel_input(struct sti_tvout *tvout, + int reg, bool main_path, bool sel_input_logic_inverted, enum sti_tvout_video_out_type video_out) { u32 sel_input; - u32 val = tvout_read(tvout, TVO_VIP_HDMI); + u32 val = tvout_read(tvout, reg); if (main_path) sel_input = TVO_VIP_SEL_INPUT_MAIN; @@ -232,22 +241,24 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout, val &= ~TVO_VIP_SEL_INPUT_MASK; val |= sel_input; - tvout_write(tvout, val, TVO_VIP_HDMI); + tvout_write(tvout, val, reg); } /** * Select the input video signed or unsigned * * @tvout: tvout structure + * @reg: register to set * @in_vid_signed: used video input format */ -static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout, u32 in_vid_fmt) +static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout, + int reg, u32 in_vid_fmt) { - u32 val = tvout_read(tvout, TVO_VIP_HDMI); + u32 val = tvout_read(tvout, reg); val &= ~TVO_IN_FMT_SIGNED; val |= in_vid_fmt; - tvout_write(tvout, val, TVO_MAIN_IN_VID_FORMAT); + tvout_write(tvout, val, reg); } /** @@ -261,6 +272,7 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path) { struct device_node *node = tvout->dev->of_node; bool sel_input_logic_inverted = false; + u32 tvo_in_vid_format; dev_dbg(tvout->dev, "%s\n", __func__); @@ -268,33 +280,36 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path) DRM_DEBUG_DRIVER("main vip for hdmi\n"); /* select the input sync for hdmi = VTG set 1 */ tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL); + tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT; } else { DRM_DEBUG_DRIVER("aux vip for hdmi\n"); /* select the input sync for hdmi = VTG set 1 */ tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL); + tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT; } /* set color channel order */ - tvout_vip_set_color_order(tvout, + tvout_vip_set_color_order(tvout, TVO_VIP_HDMI, TVO_VIP_REORDER_CR_R_SEL, TVO_VIP_REORDER_Y_G_SEL, TVO_VIP_REORDER_CB_B_SEL); /* set clipping mode (Limited range RGB/Y) */ - tvout_vip_set_clip_mode(tvout, TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y); + tvout_vip_set_clip_mode(tvout, TVO_VIP_HDMI, + TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y); /* set round mode (rounded to 8-bit per component) */ - tvout_vip_set_rnd(tvout, TVO_VIP_RND_8BIT_ROUNDED); + tvout_vip_set_rnd(tvout, TVO_VIP_HDMI, TVO_VIP_RND_8BIT_ROUNDED); if (of_device_is_compatible(node, "st,stih407-tvout")) { /* set input video format */ - tvout_vip_set_in_vid_fmt(tvout->regs + TVO_MAIN_IN_VID_FORMAT, - TVO_IN_FMT_SIGNED); + tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, + TVO_IN_FMT_SIGNED); sel_input_logic_inverted = true; } /* input selection */ - tvout_vip_set_sel_input(tvout, main_path, + tvout_vip_set_sel_input(tvout, TVO_VIP_HDMI, main_path, sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB); } @@ -309,48 +324,47 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path) { struct device_node *node = tvout->dev->of_node; bool sel_input_logic_inverted = false; + u32 tvo_in_vid_format; + int val; dev_dbg(tvout->dev, "%s\n", __func__); - if (!main_path) { - DRM_ERROR("HD Analog on aux not implemented\n"); - return; + if (main_path) { + val = TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT; + val |= TVO_SYNC_MAIN_VTG_SET_3; + tvout_write(tvout, val, TVO_HD_SYNC_SEL); + tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT; + } else { + val = TVO_SYNC_AUX_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT; + val |= TVO_SYNC_AUX_VTG_SET_3; + tvout_write(tvout, val, TVO_HD_SYNC_SEL); + tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT; } - DRM_DEBUG_DRIVER("main vip for HDF\n"); - /* set color channel order */ - tvout_vip_set_color_order(tvout->regs + TVO_VIP_HDF, + tvout_vip_set_color_order(tvout, TVO_VIP_HDF, TVO_VIP_REORDER_CR_R_SEL, TVO_VIP_REORDER_Y_G_SEL, TVO_VIP_REORDER_CB_B_SEL); - /* set clipping mode (Limited range RGB/Y) */ - tvout_vip_set_clip_mode(tvout->regs + TVO_VIP_HDF, - TVO_VIP_CLIP_LIMITED_RANGE_CB_CR); + /* set clipping mode (EAV/SAV clipping) */ + tvout_vip_set_clip_mode(tvout, TVO_VIP_HDF, TVO_VIP_CLIP_EAV_SAV); /* set round mode (rounded to 10-bit per component) */ - tvout_vip_set_rnd(tvout->regs + TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED); + tvout_vip_set_rnd(tvout, TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED); if (of_device_is_compatible(node, "st,stih407-tvout")) { /* set input video format */ - tvout_vip_set_in_vid_fmt(tvout, TVO_IN_FMT_SIGNED); + tvout_vip_set_in_vid_fmt(tvout, + tvo_in_vid_format, TVO_IN_FMT_SIGNED); sel_input_logic_inverted = true; } /* Input selection */ - tvout_vip_set_sel_input(tvout->regs + TVO_VIP_HDF, - main_path, + tvout_vip_set_sel_input(tvout, TVO_VIP_HDF, main_path, sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_YUV); - /* select the input sync for HD analog = VTG set 3 - * and HD DCS = VTG set 2 */ - tvout_write(tvout, - (TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT) - | TVO_SYNC_MAIN_VTG_SET_3, - TVO_HD_SYNC_SEL); - /* power up HD DAC */ tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF); } @@ -392,7 +406,7 @@ static void sti_hda_encoder_commit(struct drm_encoder *encoder) { struct sti_tvout *tvout = to_sti_tvout(encoder); - tvout_hda_start(tvout, true); + tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); } static void sti_hda_encoder_disable(struct drm_encoder *encoder) @@ -429,7 +443,7 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev, drm_encoder = (struct drm_encoder *) encoder; - drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK; + drm_encoder->possible_crtcs = ENCODER_CRTC_MASK; drm_encoder->possible_clones = 1 << 0; drm_encoder_init(dev, drm_encoder, @@ -444,7 +458,7 @@ static void sti_hdmi_encoder_commit(struct drm_encoder *encoder) { struct sti_tvout *tvout = to_sti_tvout(encoder); - tvout_hdmi_start(tvout, true); + tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); } static void sti_hdmi_encoder_disable(struct drm_encoder *encoder) @@ -478,7 +492,7 @@ static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev, drm_encoder = (struct drm_encoder *) encoder; - drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK; + drm_encoder->possible_crtcs = ENCODER_CRTC_MASK; drm_encoder->possible_clones = 1 << 1; drm_encoder_init(dev, drm_encoder, diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index 740d6e347a62..9564f2568e2c 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -51,10 +51,19 @@ #define VTG_TOP_V_HD_3 0x010C #define VTG_BOT_V_HD_3 0x0110 +#define VTG_H_HD_4 0x0120 +#define VTG_TOP_V_VD_4 0x0124 +#define VTG_BOT_V_VD_4 0x0128 +#define VTG_TOP_V_HD_4 0x012c +#define VTG_BOT_V_HD_4 0x0130 + #define VTG_IRQ_BOTTOM BIT(0) #define VTG_IRQ_TOP BIT(1) #define VTG_IRQ_MASK (VTG_IRQ_TOP | VTG_IRQ_BOTTOM) +/* Delay introduced by the HDMI in nb of pixel */ +#define HDMI_DELAY (6) + /* delay introduced by the Arbitrary Waveform Generator in nb of pixels */ #define AWG_DELAY_HD (-9) #define AWG_DELAY_ED (-8) @@ -133,10 +142,10 @@ static void vtg_set_mode(struct sti_vtg *vtg, writel(tmp, vtg->regs + VTG_VID_TFS); writel(tmp, vtg->regs + VTG_VID_BFS); - /* prepare VTG set 1 and 2 for HDMI and VTG set 3 for HD DAC */ - tmp = (mode->hsync_end - mode->hsync_start) << 16; + /* prepare VTG set 1 for HDMI */ + tmp = (mode->hsync_end - mode->hsync_start + HDMI_DELAY) << 16; + tmp |= HDMI_DELAY; writel(tmp, vtg->regs + VTG_H_HD_1); - writel(tmp, vtg->regs + VTG_H_HD_2); tmp = (mode->vsync_end - mode->vsync_start + 1) << 16; tmp |= 1; @@ -146,6 +155,11 @@ static void vtg_set_mode(struct sti_vtg *vtg, writel(0, vtg->regs + VTG_BOT_V_HD_1); /* prepare VTG set 2 for for HD DCS */ + tmp = (mode->hsync_end - mode->hsync_start) << 16; + writel(tmp, vtg->regs + VTG_H_HD_2); + + tmp = (mode->vsync_end - mode->vsync_start + 1) << 16; + tmp |= 1; writel(tmp, vtg->regs + VTG_TOP_V_VD_2); writel(tmp, vtg->regs + VTG_BOT_V_VD_2); writel(0, vtg->regs + VTG_TOP_V_HD_2); @@ -166,6 +180,17 @@ static void vtg_set_mode(struct sti_vtg *vtg, writel(tmp, vtg->regs + VTG_TOP_V_HD_3); writel(tmp, vtg->regs + VTG_BOT_V_HD_3); + /* Prepare VTG set 4 for DVO */ + tmp = (mode->hsync_end - mode->hsync_start) << 16; + writel(tmp, vtg->regs + VTG_H_HD_4); + + tmp = (mode->vsync_end - mode->vsync_start + 1) << 16; + tmp |= 1; + writel(tmp, vtg->regs + VTG_TOP_V_VD_4); + writel(tmp, vtg->regs + VTG_BOT_V_VD_4); + writel(0, vtg->regs + VTG_TOP_V_HD_4); + writel(0, vtg->regs + VTG_BOT_V_HD_4); + /* mode */ writel(type, vtg->regs + VTG_MODE); } diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index b957908aec73..978993fa3a36 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -168,7 +168,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, const struct tegra_dc_window *window) { unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp; - unsigned long value; + unsigned long value, flags; bool yuv, planar; /* @@ -181,6 +181,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, else bpp = planar ? 1 : 2; + spin_lock_irqsave(&dc->lock, flags); + value = WINDOW_A_SELECT << index; tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); @@ -273,6 +275,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, case TEGRA_BO_TILING_MODE_BLOCK: DRM_ERROR("hardware doesn't support block linear mode\n"); + spin_unlock_irqrestore(&dc->lock, flags); return -EINVAL; } @@ -331,6 +334,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, tegra_dc_window_commit(dc, index); + spin_unlock_irqrestore(&dc->lock, flags); + return 0; } @@ -338,11 +343,14 @@ static int tegra_window_plane_disable(struct drm_plane *plane) { struct tegra_dc *dc = to_tegra_dc(plane->crtc); struct tegra_plane *p = to_tegra_plane(plane); + unsigned long flags; u32 value; if (!plane->crtc) return 0; + spin_lock_irqsave(&dc->lock, flags); + value = WINDOW_A_SELECT << p->index; tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); @@ -352,6 +360,8 @@ static int tegra_window_plane_disable(struct drm_plane *plane) tegra_dc_window_commit(dc, p->index); + spin_unlock_irqrestore(&dc->lock, flags); + return 0; } @@ -699,14 +709,16 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); unsigned int h_offset = 0, v_offset = 0; struct tegra_bo_tiling tiling; + unsigned long value, flags; unsigned int format, swap; - unsigned long value; int err; err = tegra_fb_get_tiling(fb, &tiling); if (err < 0) return err; + spin_lock_irqsave(&dc->lock, flags); + tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); value = fb->offsets[0] + y * fb->pitches[0] + @@ -752,6 +764,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, case TEGRA_BO_TILING_MODE_BLOCK: DRM_ERROR("hardware doesn't support block linear mode\n"); + spin_unlock_irqrestore(&dc->lock, flags); return -EINVAL; } @@ -778,6 +791,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); + spin_unlock_irqrestore(&dc->lock, flags); + return 0; } @@ -814,23 +829,32 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc) unsigned long flags, base; struct tegra_bo *bo; - if (!dc->event) + spin_lock_irqsave(&drm->event_lock, flags); + + if (!dc->event) { + spin_unlock_irqrestore(&drm->event_lock, flags); return; + } bo = tegra_fb_get_plane(crtc->primary->fb, 0); + spin_lock_irqsave(&dc->lock, flags); + /* check if new start address has been latched */ + tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR); tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); + spin_unlock_irqrestore(&dc->lock, flags); + if (base == bo->paddr + crtc->primary->fb->offsets[0]) { - spin_lock_irqsave(&drm->event_lock, flags); - drm_send_vblank_event(drm, dc->pipe, dc->event); - drm_vblank_put(drm, dc->pipe); + drm_crtc_send_vblank_event(crtc, dc->event); + drm_crtc_vblank_put(crtc); dc->event = NULL; - spin_unlock_irqrestore(&drm->event_lock, flags); } + + spin_unlock_irqrestore(&drm->event_lock, flags); } void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) @@ -843,7 +867,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) if (dc->event && dc->event->base.file_priv == file) { dc->event->base.destroy(&dc->event->base); - drm_vblank_put(drm, dc->pipe); + drm_crtc_vblank_put(crtc); dc->event = NULL; } @@ -853,16 +877,16 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags) { + unsigned int pipe = drm_crtc_index(crtc); struct tegra_dc *dc = to_tegra_dc(crtc); - struct drm_device *drm = crtc->dev; if (dc->event) return -EBUSY; if (event) { - event->pipe = dc->pipe; + event->pipe = pipe; dc->event = event; - drm_vblank_get(drm, dc->pipe); + drm_crtc_vblank_get(crtc); } tegra_dc_set_base(dc, 0, 0, fb); @@ -906,7 +930,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc) } } - drm_vblank_off(drm, dc->pipe); + drm_crtc_vblank_off(crtc); tegra_dc_commit(dc); } @@ -996,8 +1020,6 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc, u32 value; int err; - drm_vblank_pre_modeset(crtc->dev, dc->pipe); - err = tegra_crtc_setup_clk(crtc, mode); if (err) { dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err); @@ -1051,6 +1073,8 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc) unsigned int syncpt; unsigned long value; + drm_crtc_vblank_off(crtc); + /* hardware initialization */ reset_control_deassert(dc->rst); usleep_range(10000, 20000); @@ -1091,7 +1115,7 @@ static void tegra_crtc_commit(struct drm_crtc *crtc) { struct tegra_dc *dc = to_tegra_dc(crtc); - drm_vblank_post_modeset(crtc->dev, dc->pipe); + drm_crtc_vblank_on(crtc); tegra_dc_commit(dc); } @@ -1127,7 +1151,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data) /* dev_dbg(dc->dev, "%s(): vertical blank\n", __func__); */ - drm_handle_vblank(dc->base.dev, dc->pipe); + drm_crtc_handle_vblank(&dc->base); tegra_dc_finish_page_flip(dc); } diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index e549afeece1f..d4f827593dfa 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -694,24 +694,28 @@ static const struct file_operations tegra_drm_fops = { .llseek = noop_llseek, }; -static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe) +static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, + unsigned int pipe) { struct drm_crtc *crtc; list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) { - struct tegra_dc *dc = to_tegra_dc(crtc); - - if (dc->pipe == pipe) + if (pipe == drm_crtc_index(crtc)) return crtc; } return NULL; } -static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc) +static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe) { + struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe); + + if (!crtc) + return 0; + /* TODO: implement real hardware counter using syncpoints */ - return drm_vblank_count(dev, crtc); + return drm_crtc_vblank_count(crtc); } static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe) diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index da32086cbeaf..8777b7f75791 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -216,32 +216,58 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) } } -static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo, - size_t size) +static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) { + struct scatterlist *s; + struct sg_table *sgt; + unsigned int i; + bo->pages = drm_gem_get_pages(&bo->gem); if (IS_ERR(bo->pages)) return PTR_ERR(bo->pages); - bo->num_pages = size >> PAGE_SHIFT; - - bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); - if (IS_ERR(bo->sgt)) { - drm_gem_put_pages(&bo->gem, bo->pages, false, false); - return PTR_ERR(bo->sgt); + bo->num_pages = bo->gem.size >> PAGE_SHIFT; + + sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); + if (IS_ERR(sgt)) + goto put_pages; + + /* + * Fake up the SG table so that dma_map_sg() can be used to flush the + * pages associated with it. Note that this relies on the fact that + * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is + * only cache maintenance. + * + * TODO: Replace this by drm_clflash_sg() once it can be implemented + * without relying on symbols that are not exported. + */ + for_each_sg(sgt->sgl, s, sgt->nents, i) + sg_dma_address(s) = sg_phys(s); + + if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) { + sgt = ERR_PTR(-ENOMEM); + goto release_sgt; } + bo->sgt = sgt; + return 0; + +release_sgt: + sg_free_table(sgt); + kfree(sgt); +put_pages: + drm_gem_put_pages(&bo->gem, bo->pages, false, false); + return PTR_ERR(sgt); } -static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo, - size_t size) +static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) { struct tegra_drm *tegra = drm->dev_private; int err; if (tegra->domain) { - err = tegra_bo_get_pages(drm, bo, size); + err = tegra_bo_get_pages(drm, bo); if (err < 0) return err; @@ -251,6 +277,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo, return err; } } else { + size_t size = bo->gem.size; + bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, GFP_KERNEL | __GFP_NOWARN); if (!bo->vaddr) { @@ -274,7 +302,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, if (IS_ERR(bo)) return bo; - err = tegra_bo_alloc(drm, bo, size); + err = tegra_bo_alloc(drm, bo); if (err < 0) goto release; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index d56d3f8b8d6b..095fca91525c 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -644,7 +644,6 @@ static struct platform_driver tilcdc_platform_driver = { .probe = tilcdc_pdev_probe, .remove = tilcdc_pdev_remove, .driver = { - .owner = THIS_MODULE, .name = "tilcdc", .pm = &tilcdc_pm_ops, .of_match_table = tilcdc_of_match, diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 8ce508e76208..3820ae97a030 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -93,7 +93,8 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); */ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, - struct list_head *list, bool intr) + struct list_head *list, bool intr, + struct list_head *dups) { struct ttm_bo_global *glob; struct ttm_validate_buffer *entry; @@ -117,6 +118,13 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, __ttm_bo_unreserve(bo); ret = -EBUSY; + + } else if (ret == -EALREADY && dups) { + struct ttm_validate_buffer *safe = entry; + entry = list_prev_entry(entry, head); + list_del(&safe->head); + list_add(&safe->head, dups); + continue; } if (!ret) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index db7621828bc7..7b5d22110f25 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1062,8 +1062,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, vmaster = vmw_master_check(dev, file_priv, flags); if (unlikely(IS_ERR(vmaster))) { - DRM_INFO("IOCTL ERROR %d\n", nr); - return PTR_ERR(vmaster); + ret = PTR_ERR(vmaster); + + if (ret != -ERESTARTSYS) + DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n", + nr, ret); + return ret; } ret = ioctl_func(filp, cmd, arg); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 596cd6dafd33..33176d05db35 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2487,7 +2487,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, if (unlikely(ret != 0)) goto out_err_nores; - ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true); + ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, + true, NULL); if (unlikely(ret != 0)) goto out_err; @@ -2677,7 +2678,8 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, query_val.shared = false; list_add_tail(&query_val.head, &validate_list); - ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false); + ret = ttm_eu_reserve_buffers(&ticket, &validate_list, + false, NULL); if (unlikely(ret != 0)) { vmw_execbuf_unpin_panic(dev_priv); goto out_no_reserve; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 197164fd7803..b7594cb758af 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -545,35 +545,19 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence) static void vmw_fence_destroy(struct vmw_fence_obj *fence) { - struct vmw_fence_manager *fman = fman_from_fence(fence); - fence_free(&fence->base); - - /* - * Free kernel space accounting. - */ - ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), - fman->fence_size); } int vmw_fence_create(struct vmw_fence_manager *fman, uint32_t seqno, struct vmw_fence_obj **p_fence) { - struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); struct vmw_fence_obj *fence; int ret; - ret = ttm_mem_global_alloc(mem_glob, fman->fence_size, - false, false); - if (unlikely(ret != 0)) - return ret; - fence = kzalloc(sizeof(*fence), GFP_KERNEL); - if (unlikely(fence == NULL)) { - ret = -ENOMEM; - goto out_no_object; - } + if (unlikely(fence == NULL)) + return -ENOMEM; ret = vmw_fence_obj_init(fman, fence, seqno, vmw_fence_destroy); @@ -585,8 +569,6 @@ int vmw_fence_create(struct vmw_fence_manager *fman, out_err_init: kfree(fence); -out_no_object: - ttm_mem_global_free(mem_glob, fman->fence_size); return ret; } @@ -1105,6 +1087,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, if (ret != 0) goto out_no_queue; + return 0; + out_no_queue: event->base.destroy(&event->base); out_no_event: @@ -1180,17 +1164,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, BUG_ON(fence == NULL); - if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME) - ret = vmw_event_fence_action_create(file_priv, fence, - arg->flags, - arg->user_data, - true); - else - ret = vmw_event_fence_action_create(file_priv, fence, - arg->flags, - arg->user_data, - true); - + ret = vmw_event_fence_action_create(file_priv, fence, + arg->flags, + arg->user_data, + true); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) DRM_ERROR("Failed to attach event to fence.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 026de7cea0f6..210ef15b1d09 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1222,7 +1222,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, val_buf->bo = ttm_bo_reference(&res->backup->base); val_buf->shared = false; list_add_tail(&val_buf->head, &val_list); - ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible); + ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL); if (unlikely(ret != 0)) goto out_no_reserve; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 8719fb3cccc9..6a4584a43aa6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -198,7 +198,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res, cmd->header.size = sizeof(cmd->body); cmd->body.shid = res->id; cmd->body.mobid = bo->mem.start; - cmd->body.offsetInBytes = 0; + cmd->body.offsetInBytes = res->backup_offset; res->backup_dirty = false; vmw_fifo_commit(dev_priv, sizeof(*cmd)); diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c index d6f56471bd2a..752cdd2da89a 100644 --- a/drivers/gpu/ipu-v3/ipu-csi.c +++ b/drivers/gpu/ipu-v3/ipu-csi.c @@ -227,83 +227,83 @@ static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk, static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code) { switch (mbus_code) { - case V4L2_MBUS_FMT_BGR565_2X8_BE: - case V4L2_MBUS_FMT_BGR565_2X8_LE: - case V4L2_MBUS_FMT_RGB565_2X8_BE: - case V4L2_MBUS_FMT_RGB565_2X8_LE: + case MEDIA_BUS_FMT_BGR565_2X8_BE: + case MEDIA_BUS_FMT_BGR565_2X8_LE: + case MEDIA_BUS_FMT_RGB565_2X8_BE: + case MEDIA_BUS_FMT_RGB565_2X8_LE: cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565; cfg->mipi_dt = MIPI_DT_RGB565; cfg->data_width = IPU_CSI_DATA_WIDTH_8; break; - case V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE: - case V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE: + case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE: + case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE: cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB444; cfg->mipi_dt = MIPI_DT_RGB444; cfg->data_width = IPU_CSI_DATA_WIDTH_8; break; - case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE: - case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: + case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE: + case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE: cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555; cfg->mipi_dt = MIPI_DT_RGB555; cfg->data_width = IPU_CSI_DATA_WIDTH_8; break; - case V4L2_MBUS_FMT_UYVY8_2X8: + case MEDIA_BUS_FMT_UYVY8_2X8: cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY; cfg->mipi_dt = MIPI_DT_YUV422; cfg->data_width = IPU_CSI_DATA_WIDTH_8; break; - case V4L2_MBUS_FMT_YUYV8_2X8: + case MEDIA_BUS_FMT_YUYV8_2X8: cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV; cfg->mipi_dt = MIPI_DT_YUV422; cfg->data_width = IPU_CSI_DATA_WIDTH_8; break; - case V4L2_MBUS_FMT_UYVY8_1X16: + case MEDIA_BUS_FMT_UYVY8_1X16: cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY; cfg->mipi_dt = MIPI_DT_YUV422; cfg->data_width = IPU_CSI_DATA_WIDTH_16; break; - case V4L2_MBUS_FMT_YUYV8_1X16: + case MEDIA_BUS_FMT_YUYV8_1X16: cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV; cfg->mipi_dt = MIPI_DT_YUV422; cfg->data_width = IPU_CSI_DATA_WIDTH_16; break; - case V4L2_MBUS_FMT_SBGGR8_1X8: - case V4L2_MBUS_FMT_SGBRG8_1X8: - case V4L2_MBUS_FMT_SGRBG8_1X8: - case V4L2_MBUS_FMT_SRGGB8_1X8: + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; cfg->mipi_dt = MIPI_DT_RAW8; cfg->data_width = IPU_CSI_DATA_WIDTH_8; break; - case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8: - case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8: - case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8: - case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8: - case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE: - case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE: - case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE: - case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE: + case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: + case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE: + case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE: + case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE: + case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE: cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; cfg->mipi_dt = MIPI_DT_RAW10; cfg->data_width = IPU_CSI_DATA_WIDTH_8; break; - case V4L2_MBUS_FMT_SBGGR10_1X10: - case V4L2_MBUS_FMT_SGBRG10_1X10: - case V4L2_MBUS_FMT_SGRBG10_1X10: - case V4L2_MBUS_FMT_SRGGB10_1X10: + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; cfg->mipi_dt = MIPI_DT_RAW10; cfg->data_width = IPU_CSI_DATA_WIDTH_10; break; - case V4L2_MBUS_FMT_SBGGR12_1X12: - case V4L2_MBUS_FMT_SGBRG12_1X12: - case V4L2_MBUS_FMT_SGRBG12_1X12: - case V4L2_MBUS_FMT_SRGGB12_1X12: + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; cfg->mipi_dt = MIPI_DT_RAW12; cfg->data_width = IPU_CSI_DATA_WIDTH_12; break; - case V4L2_MBUS_FMT_JPEG_1X8: + case MEDIA_BUS_FMT_JPEG_1X8: /* TODO */ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_JPEG; cfg->mipi_dt = MIPI_DT_RAW8; |