diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_device.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_device.c | 332 |
1 files changed, 284 insertions, 48 deletions
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 5ef9b50a20d0..1aba6f9eaa19 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -5,6 +5,7 @@ #include "xe_device.h" +#include <linux/delay.h> #include <linux/units.h> #include <drm/drm_aperture.h> @@ -17,6 +18,7 @@ #include <drm/xe_drm.h> #include "display/xe_display.h" +#include "instructions/xe_gpu_commands.h" #include "regs/xe_gt_regs.h" #include "regs/xe_regs.h" #include "xe_bo.h" @@ -27,15 +29,20 @@ #include "xe_drv.h" #include "xe_exec.h" #include "xe_exec_queue.h" +#include "xe_force_wake.h" #include "xe_ggtt.h" #include "xe_gsc_proxy.h" #include "xe_gt.h" #include "xe_gt_mcr.h" +#include "xe_gt_printk.h" +#include "xe_gt_sriov_vf.h" +#include "xe_guc.h" #include "xe_hwmon.h" #include "xe_irq.h" #include "xe_memirq.h" #include "xe_mmio.h" #include "xe_module.h" +#include "xe_observation.h" #include "xe_pat.h" #include "xe_pcode.h" #include "xe_pm.h" @@ -45,7 +52,11 @@ #include "xe_ttm_stolen_mgr.h" #include "xe_ttm_sys_mgr.h" #include "xe_vm.h" +#include "xe_vram.h" #include "xe_wait_user_fence.h" +#include "xe_wa.h" + +#include <generated/xe_wa_oob.h> static int xe_file_open(struct drm_device *dev, struct drm_file *file) { @@ -53,6 +64,7 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) struct xe_drm_client *client; struct xe_file *xef; int ret = -ENOMEM; + struct task_struct *task = NULL; xef = kzalloc(sizeof(*xef), GFP_KERNEL); if (!xef) @@ -79,9 +91,63 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) spin_unlock(&xe->clients.lock); file->driver_priv = xef; + kref_init(&xef->refcount); + + task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID); + if (task) { + xef->process_name = kstrdup(task->comm, GFP_KERNEL); + xef->pid = task->pid; + put_task_struct(task); + } + return 0; } +static void xe_file_destroy(struct kref *ref) +{ + struct xe_file *xef = container_of(ref, struct xe_file, refcount); + struct xe_device *xe = xef->xe; + + xa_destroy(&xef->exec_queue.xa); + mutex_destroy(&xef->exec_queue.lock); + xa_destroy(&xef->vm.xa); + mutex_destroy(&xef->vm.lock); + + spin_lock(&xe->clients.lock); + xe->clients.count--; + spin_unlock(&xe->clients.lock); + + xe_drm_client_put(xef->client); + kfree(xef->process_name); + kfree(xef); +} + +/** + * xe_file_get() - Take a reference to the xe file object + * @xef: Pointer to the xe file + * + * Anyone with a pointer to xef must take a reference to the xe file + * object using this call. + * + * Return: xe file pointer + */ +struct xe_file *xe_file_get(struct xe_file *xef) +{ + kref_get(&xef->refcount); + return xef; +} + +/** + * xe_file_put() - Drop a reference to the xe file object + * @xef: Pointer to the xe file + * + * Used to drop reference to the xef object + */ +void xe_file_put(struct xe_file *xef) +{ + kref_put(&xef->refcount, xe_file_destroy); +} + static void xe_file_close(struct drm_device *dev, struct drm_file *file) { struct xe_device *xe = to_xe_device(dev); @@ -90,27 +156,26 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file) struct xe_exec_queue *q; unsigned long idx; - mutex_lock(&xef->exec_queue.lock); + xe_pm_runtime_get(xe); + + /* + * No need for exec_queue.lock here as there is no contention for it + * when FD is closing as IOCTLs presumably can't be modifying the + * xarray. Taking exec_queue.lock here causes undue dependency on + * vm->lock taken during xe_exec_queue_kill(). + */ xa_for_each(&xef->exec_queue.xa, idx, q) { xe_exec_queue_kill(q); xe_exec_queue_put(q); } - mutex_unlock(&xef->exec_queue.lock); - xa_destroy(&xef->exec_queue.xa); - mutex_destroy(&xef->exec_queue.lock); mutex_lock(&xef->vm.lock); xa_for_each(&xef->vm.xa, idx, vm) xe_vm_close_and_put(vm); mutex_unlock(&xef->vm.lock); - xa_destroy(&xef->vm.xa); - mutex_destroy(&xef->vm.lock); - spin_lock(&xe->clients.lock); - xe->clients.count--; - spin_unlock(&xe->clients.lock); + xe_file_put(xef); - xe_drm_client_put(xef->client); - kfree(xef); + xe_pm_runtime_put(xe); } static const struct drm_ioctl_desc xe_ioctls[] = { @@ -130,6 +195,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = { DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW), }; static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -138,6 +204,9 @@ static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct xe_device *xe = to_xe_device(file_priv->minor->dev); long ret; + if (xe_device_wedged(xe)) + return -ECANCELED; + ret = xe_pm_runtime_get_ioctl(xe); if (ret >= 0) ret = drm_ioctl(file, cmd, arg); @@ -153,6 +222,9 @@ static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo struct xe_device *xe = to_xe_device(file_priv->minor->dev); long ret; + if (xe_device_wedged(xe)) + return -ECANCELED; + ret = xe_pm_runtime_get_ioctl(xe); if (ret >= 0) ret = drm_compat_ioctl(file, cmd, arg); @@ -180,13 +252,6 @@ static const struct file_operations xe_driver_fops = { #endif }; -static void xe_driver_release(struct drm_device *dev) -{ - struct xe_device *xe = to_xe_device(dev); - - pci_set_drvdata(to_pci_dev(xe->drm.dev), NULL); -} - static struct drm_driver driver = { /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. @@ -205,8 +270,6 @@ static struct drm_driver driver = { #ifdef CONFIG_PROC_FS .show_fdinfo = xe_drm_client_fdinfo, #endif - .release = &xe_driver_release, - .ioctls = xe_ioctls, .num_ioctls = ARRAY_SIZE(xe_ioctls), .fops = &xe_driver_fops, @@ -269,7 +332,10 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, init_waitqueue_head(&xe->ufence_wq); - drmm_mutex_init(&xe->drm, &xe->usm.lock); + err = drmm_mutex_init(&xe->drm, &xe->usm.lock); + if (err) + goto err; + xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { @@ -378,7 +444,7 @@ static void xe_driver_flr(struct xe_device *xe) xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); } -static void xe_driver_flr_fini(struct drm_device *drm, void *arg) +static void xe_driver_flr_fini(void *arg) { struct xe_device *xe = arg; @@ -386,7 +452,7 @@ static void xe_driver_flr_fini(struct drm_device *drm, void *arg) xe_driver_flr(xe); } -static void xe_device_sanitize(struct drm_device *drm, void *arg) +static void xe_device_sanitize(void *arg) { struct xe_device *xe = arg; struct xe_gt *gt; @@ -473,6 +539,17 @@ static int wait_for_lmem_ready(struct xe_device *xe) return 0; } +static void update_device_info(struct xe_device *xe) +{ + /* disable features that are not available/applicable to VFs */ + if (IS_SRIOV_VF(xe)) { + xe->info.enable_display = 0; + xe->info.has_heci_gscfi = 0; + xe->info.skip_guc_pc = 1; + xe->info.skip_pcode = 1; + } +} + /** * xe_device_probe_early: Device early probe * @xe: xe device instance @@ -493,6 +570,8 @@ int xe_device_probe_early(struct xe_device *xe) xe_sriov_probe_early(xe); + update_device_info(xe); + err = xe_pcode_probe_early(xe); if (err) return err; @@ -501,6 +580,8 @@ int xe_device_probe_early(struct xe_device *xe) if (err) return err; + xe->wedged.mode = xe_modparam.wedged_mode; + return 0; } @@ -551,14 +632,28 @@ int xe_device_probe(struct xe_device *xe) if (err) return err; - xe_mmio_probe_tiles(xe); + err = xe_mmio_probe_tiles(xe); + if (err) + return err; xe_ttm_sys_mgr_init(xe); - for_each_gt(gt, xe, id) - xe_force_wake_init_gt(gt, gt_to_fw(gt)); + for_each_gt(gt, xe, id) { + err = xe_gt_init_early(gt); + if (err) + return err; + } for_each_tile(tile, xe, id) { + if (IS_SRIOV_VF(xe)) { + xe_guc_comm_init_early(&tile->primary_gt->uc.guc); + err = xe_gt_sriov_vf_bootstrap(tile->primary_gt); + if (err) + return err; + err = xe_gt_sriov_vf_query_config(tile->primary_gt); + if (err) + return err; + } err = xe_ggtt_init_early(tile->mem.ggtt); if (err) return err; @@ -578,13 +673,10 @@ int xe_device_probe(struct xe_device *xe) err = xe_devcoredump_init(xe); if (err) return err; - err = drmm_add_action_or_reset(&xe->drm, xe_driver_flr_fini, xe); + err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe); if (err) return err; - for_each_gt(gt, xe, id) - xe_pcode_init(gt); - err = xe_display_init_noirq(xe); if (err) return err; @@ -593,24 +685,18 @@ int xe_device_probe(struct xe_device *xe) if (err) goto err; - for_each_gt(gt, xe, id) { - err = xe_gt_init_early(gt); - if (err) - goto err_irq_shutdown; - } - err = xe_device_set_has_flat_ccs(xe); if (err) - goto err_irq_shutdown; + goto err; - err = xe_mmio_probe_vram(xe); + err = xe_vram_probe(xe); if (err) - goto err_irq_shutdown; + goto err; for_each_tile(tile, xe, id) { err = xe_tile_init_noalloc(tile); if (err) - goto err_irq_shutdown; + goto err; } /* Allocate and map stolen after potential VRAM resize */ @@ -624,7 +710,7 @@ int xe_device_probe(struct xe_device *xe) */ err = xe_display_init_noaccel(xe); if (err) - goto err_irq_shutdown; + goto err; for_each_gt(gt, xe, id) { last_gt = id; @@ -636,25 +722,37 @@ int xe_device_probe(struct xe_device *xe) xe_heci_gsc_init(xe); - err = xe_display_init(xe); + err = xe_oa_init(xe); if (err) goto err_fini_gt; + err = xe_display_init(xe); + if (err) + goto err_fini_oa; + err = drm_dev_register(&xe->drm, 0); if (err) goto err_fini_display; xe_display_register(xe); + xe_oa_register(xe); + xe_debugfs_register(xe); xe_hwmon_register(xe); - return drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe); + for_each_gt(gt, xe, id) + xe_gt_sanitize_freq(gt); + + return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe); err_fini_display: xe_display_driver_remove(xe); +err_fini_oa: + xe_oa_fini(xe); + err_fini_gt: for_each_gt(gt, xe, id) { if (id < last_gt) @@ -663,8 +761,6 @@ err_fini_gt: break; } -err_irq_shutdown: - xe_irq_shutdown(xe); err: xe_display_fini(xe); return err; @@ -683,29 +779,118 @@ void xe_device_remove(struct xe_device *xe) struct xe_gt *gt; u8 id; + xe_oa_unregister(xe); + xe_device_remove_display(xe); xe_display_fini(xe); + xe_oa_fini(xe); + xe_heci_gsc_fini(xe); for_each_gt(gt, xe, id) xe_gt_remove(gt); - - xe_irq_shutdown(xe); } void xe_device_shutdown(struct xe_device *xe) { } +/** + * xe_device_wmb() - Device specific write memory barrier + * @xe: the &xe_device + * + * While wmb() is sufficient for a barrier if we use system memory, on discrete + * platforms with device memory we additionally need to issue a register write. + * Since it doesn't matter which register we write to, use the read-only VF_CAP + * register that is also marked as accessible by the VFs. + */ void xe_device_wmb(struct xe_device *xe) { struct xe_gt *gt = xe_root_mmio_gt(xe); wmb(); if (IS_DGFX(xe)) - xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0); + xe_mmio_write32(gt, VF_CAP_REG, 0); +} + +/** + * xe_device_td_flush() - Flush transient L3 cache entries + * @xe: The device + * + * Display engine has direct access to memory and is never coherent with L3/L4 + * caches (or CPU caches), however KMD is responsible for specifically flushing + * transient L3 GPU cache entries prior to the flip sequence to ensure scanout + * can happen from such a surface without seeing corruption. + * + * Display surfaces can be tagged as transient by mapping it using one of the + * various L3:XD PAT index modes on Xe2. + * + * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed + * at the end of each submission via PIPE_CONTROL for compute/render, since SA + * Media is not coherent with L3 and we want to support render-vs-media + * usescases. For other engines like copy/blt the HW internally forces uncached + * behaviour, hence why we can skip the TDF on such platforms. + */ +void xe_device_td_flush(struct xe_device *xe) +{ + struct xe_gt *gt; + u8 id; + + if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) + return; + + if (XE_WA(xe_root_mmio_gt(xe), 16023588340)) { + xe_device_l2_flush(xe); + return; + } + + for_each_gt(gt, xe, id) { + if (xe_gt_is_media_type(gt)) + continue; + + if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)) + return; + + xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); + /* + * FIXME: We can likely do better here with our choice of + * timeout. Currently we just assume the worst case, i.e. 150us, + * which is believed to be sufficient to cover the worst case + * scenario on current platforms if all cache entries are + * transient and need to be flushed.. + */ + if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, + 150, NULL, false)) + xe_gt_err_once(gt, "TD flush timeout\n"); + + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + } +} + +void xe_device_l2_flush(struct xe_device *xe) +{ + struct xe_gt *gt; + int err; + + gt = xe_root_mmio_gt(xe); + + if (!XE_WA(gt, 16023588340)) + return; + + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (err) + return; + + spin_lock(>->global_invl_lock); + xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1); + + if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true)) + xe_gt_err_once(gt, "Global invalidation timeout\n"); + spin_unlock(>->global_invl_lock); + + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); } u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) @@ -759,3 +944,54 @@ u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address) { return address & GENMASK_ULL(xe->info.va_bits - 1, 0); } + +static void xe_device_wedged_fini(struct drm_device *drm, void *arg) +{ + struct xe_device *xe = arg; + + xe_pm_runtime_put(xe); +} + +/** + * xe_device_declare_wedged - Declare device wedged + * @xe: xe device instance + * + * This is a final state that can only be cleared with a mudule + * re-probe (unbind + bind). + * In this state every IOCTL will be blocked so the GT cannot be used. + * In general it will be called upon any critical error such as gt reset + * failure or guc loading failure. + * If xe.wedged module parameter is set to 2, this function will be called + * on every single execution timeout (a.k.a. GPU hang) right after devcoredump + * snapshot capture. In this mode, GT reset won't be attempted so the state of + * the issue is preserved for further debugging. + */ +void xe_device_declare_wedged(struct xe_device *xe) +{ + struct xe_gt *gt; + u8 id; + + if (xe->wedged.mode == 0) { + drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n"); + return; + } + + if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { + drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n"); + return; + } + + xe_pm_runtime_get_noresume(xe); + + if (!atomic_xchg(&xe->wedged.flag, 1)) { + xe->needs_flr_on_fini = true; + drm_err(&xe->drm, + "CRITICAL: Xe has declared device %s as wedged.\n" + "IOCTLs and executions are blocked. Only a rebind may clear the failure\n" + "Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n", + dev_name(xe->drm.dev)); + } + + for_each_gt(gt, xe, id) + xe_gt_declare_wedged(gt); +} |