From 86ceaaaec59707b06216a15b3852867fa2f1574e Mon Sep 17 00:00:00 2001 From: Jonathan Cavitt Date: Tue, 28 Nov 2023 08:25:05 -0800 Subject: drm/i915/gem: Atomically invalidate userptr on mmu-notifier Never block for outstanding work on userptr object upon receipt of a mmu-notifier. The reason we originally did so was to immediately unbind the userptr and unpin its pages, but since that has been dropped in commit b4b9731b02c3c ("drm/i915: Simplify userptr locking"), we never return the pages to the system i.e. never drop our page->mapcount and so do not allow the page and CPU PTE to be revoked. Based on this history, we know we are safe to drop the wait entirely. Upon return from mmu-notifier, we will still have the userptr pages pinned preventing the following PTE operation (such as try_to_unmap) adjusting the vm_area_struct, so it is safe to keep the pages around for as long as we still have i/o pending. We do not have any means currently to asynchronously revalidate the userptr pages, that is always prior to next use. Signed-off-by: Chris Wilson Signed-off-by: Jonathan Cavitt Reviewed-by: Andi Shyti Signed-off-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20231128162505.3493942-1-jonathan.cavitt@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 8 ----- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 42 -------------------------- drivers/gpu/drm/i915/gem/i915_gem_userptr.h | 14 --------- drivers/gpu/drm/i915/i915_drv.h | 8 ----- drivers/gpu/drm/i915/i915_gem.c | 5 --- 5 files changed, 77 deletions(-) delete mode 100644 drivers/gpu/drm/i915/gem/i915_gem_userptr.h (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 12cb2ef8dbd6..8968a09f3e44 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2159,12 +2159,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) #ifdef CONFIG_MMU_NOTIFIER if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) { - read_lock(&eb->i915->mm.notifier_lock); - - /* - * count is always at least 1, otherwise __EXEC_USERPTR_USED - * could not have been set - */ for (i = 0; i < count; i++) { struct eb_vma *ev = &eb->vma[i]; struct drm_i915_gem_object *obj = ev->vma->obj; @@ -2176,8 +2170,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) if (err) break; } - - read_unlock(&eb->i915->mm.notifier_lock); } #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 1d3ebdf4069b..0e21ce9d3e5a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -42,7 +42,6 @@ #include "i915_drv.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" -#include "i915_gem_userptr.h" #include "i915_scatterlist.h" #ifdef CONFIG_MMU_NOTIFIER @@ -61,36 +60,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { - struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier); - struct drm_i915_private *i915 = to_i915(obj->base.dev); - long r; - - if (!mmu_notifier_range_blockable(range)) - return false; - - write_lock(&i915->mm.notifier_lock); - mmu_interval_set_seq(mni, cur_seq); - - write_unlock(&i915->mm.notifier_lock); - - /* - * We don't wait when the process is exiting. This is valid - * because the object will be cleaned up anyway. - * - * This is also temporarily required as a hack, because we - * cannot currently force non-consistent batch buffers to preempt - * and reschedule by waiting on it, hanging processes on exit. - */ - if (current->flags & PF_EXITING) - return true; - - /* we will unbind on next submission, still have userptr pins */ - r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false, - MAX_SCHEDULE_TIMEOUT); - if (r <= 0) - drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); - return true; } @@ -580,15 +550,3 @@ i915_gem_userptr_ioctl(struct drm_device *dev, #endif } -int i915_gem_init_userptr(struct drm_i915_private *dev_priv) -{ -#ifdef CONFIG_MMU_NOTIFIER - rwlock_init(&dev_priv->mm.notifier_lock); -#endif - - return 0; -} - -void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv) -{ -} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.h b/drivers/gpu/drm/i915/gem/i915_gem_userptr.h deleted file mode 100644 index 8dadb2f8436d..000000000000 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2021 Intel Corporation - */ - -#ifndef __I915_GEM_USERPTR_H__ -#define __I915_GEM_USERPTR_H__ - -struct drm_i915_private; - -int i915_gem_init_userptr(struct drm_i915_private *dev_priv); -void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); - -#endif /* __I915_GEM_USERPTR_H__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6a2a78c61f21..a72cecd2dbc7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -165,14 +165,6 @@ struct i915_gem_mm { struct notifier_block vmap_notifier; struct shrinker shrinker; -#ifdef CONFIG_MMU_NOTIFIER - /** - * notifier_lock for mmu notifiers, memory may not be allocated - * while holding this lock. - */ - rwlock_t notifier_lock; -#endif - /* shrinker accounting, also useful for userland debugging */ u64 shrink_memory; u32 shrink_count; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c166ad5e187a..02e10451867a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -48,7 +48,6 @@ #include "gem/i915_gem_object_frontbuffer.h" #include "gem/i915_gem_pm.h" #include "gem/i915_gem_region.h" -#include "gem/i915_gem_userptr.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" @@ -1165,10 +1164,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K; - ret = i915_gem_init_userptr(dev_priv); - if (ret) - return ret; - for_each_gt(gt, dev_priv, i) { intel_uc_fetch_firmwares(>->uc); intel_wopcm_init(>->wopcm); -- cgit v1.2.3 From 0c68132df6e66244acec1bb5b9e19b0751414389 Mon Sep 17 00:00:00 2001 From: Umesh Nerlige Ramappa Date: Mon, 18 Dec 2023 16:05:43 -0800 Subject: drm/i915/perf: Update handling of MMIO triggered reports On XEHP platforms user is not able to find MMIO triggered reports in the OA buffer since i915 squashes the context ID fields. These context ID fields hold the MMIO trigger markers. Update logic to not squash the context ID fields of MMIO triggered reports. Fixes: 7eeaedf79989 ("drm/i915/perf: Determine context valid in OA reports") Signed-off-by: Umesh Nerlige Ramappa Reviewed-by: Ashutosh Dixit Link: https://patchwork.freedesktop.org/patch/msgid/20231219000543.1087706-1-umesh.nerlige.ramappa@intel.com --- drivers/gpu/drm/i915/i915_perf.c | 39 ++++++++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index f92c2a464ebe..dbbc95d18550 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -772,10 +772,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * The reason field includes flags identifying what * triggered this specific report (mostly timer * triggered or e.g. due to a context switch). - * - * In MMIO triggered reports, some platforms do not set the - * reason bit in this field and it is valid to have a reason - * field of zero. */ reason = oa_report_reason(stream, report); ctx_id = oa_context_id(stream, report32); @@ -787,8 +783,41 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * * Note: that we don't clear the valid_ctx_bit so userspace can * understand that the ID has been squashed by the kernel. + * + * Update: + * + * On XEHP platforms the behavior of context id valid bit has + * changed compared to prior platforms. To describe this, we + * define a few terms: + * + * context-switch-report: This is a report with the reason type + * being context-switch. It is generated when a context switches + * out. + * + * context-valid-bit: A bit that is set in the report ID field + * to indicate that a valid context has been loaded. + * + * gpu-idle: A condition characterized by a + * context-switch-report with context-valid-bit set to 0. + * + * On prior platforms, context-id-valid bit is set to 0 only + * when GPU goes idle. In all other reports, it is set to 1. + * + * On XEHP platforms, context-valid-bit is set to 1 in a context + * switch report if a new context switched in. For all other + * reports it is set to 0. + * + * This change in behavior causes an issue with MMIO triggered + * reports. MMIO triggered reports have the markers in the + * context ID field and the context-valid-bit is 0. The logic + * below to squash the context ID would render the report + * useless since the user will not be able to find it in the OA + * buffer. Since MMIO triggered reports exist only on XEHP, + * we should avoid squashing these for XEHP platforms. */ - if (oa_report_ctx_invalid(stream, report)) { + + if (oa_report_ctx_invalid(stream, report) && + GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) { ctx_id = INVALID_CTX_ID; oa_context_id_squash(stream, report32); } -- cgit v1.2.3 From 7353c3d7c15017140a8b984e41f94be0bf535e73 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 26 Dec 2023 11:54:29 -0800 Subject: drm/i915/gem: reconcile Excess struct member kernel-doc warnings Document nested struct members with full names as described in Documentation/doc-guide/kernel-doc.rst. i915_gem_context_types.h:420: warning: Excess struct member 'lock' description in 'i915_gem_context' Signed-off-by: Randy Dunlap Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Tvrtko Ursulin Cc: intel-gfx@lists.freedesktop.org Cc: Jonathan Corbet Cc: dri-devel@lists.freedesktop.org Reviewed-by: Rodrigo Vivi Reviewed-by: Andi Shyti Signed-off-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20231226195432.10891-1-rdunlap@infradead.org --- drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index c573c067779f..03bc7f9d191b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -412,9 +412,9 @@ struct i915_gem_context { /** @stale: tracks stale engines to be destroyed */ struct { - /** @lock: guards engines */ + /** @stale.lock: guards engines */ spinlock_t lock; - /** @engines: list of stale engines */ + /** @stale.engines: list of stale engines */ struct list_head engines; } stale; }; -- cgit v1.2.3 From cd1d91115ff1929ec346d85f512ef260ddf8131e Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 28 Dec 2023 15:49:46 -0800 Subject: drm/i915/gt: reconcile Excess struct member kernel-doc warnings Document nested struct members with full names as described in Documentation/doc-guide/kernel-doc.rst. intel_gsc.h:34: warning: Excess struct member 'gem_obj' description in 'intel_gsc' Also add missing field member descriptions. Signed-off-by: Randy Dunlap Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Tvrtko Ursulin Cc: intel-gfx@lists.freedesktop.org Cc: Jonathan Corbet Cc: dri-devel@lists.freedesktop.org Reviewed-by: Rodrigo Vivi Cc: Andi Shyti Reviewed-by: Andi Shyti Signed-off-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20231228234946.12405-1-rdunlap@infradead.org --- drivers/gpu/drm/i915/gt/intel_gsc.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h index 7ab3ca0f9f26..013c64251448 100644 --- a/drivers/gpu/drm/i915/gt/intel_gsc.h +++ b/drivers/gpu/drm/i915/gt/intel_gsc.h @@ -21,8 +21,11 @@ struct mei_aux_device; /** * struct intel_gsc - graphics security controller * - * @gem_obj: scratch memory GSC operations - * @intf : gsc interface + * @intf: gsc interface + * @intf.adev: MEI aux. device for this @intf + * @intf.gem_obj: scratch memory GSC operations + * @intf.irq: IRQ for this device (%-1 for no IRQ) + * @intf.id: this interface's id number/index */ struct intel_gsc { struct intel_gsc_intf { -- cgit v1.2.3 From e4cf1a70fad3e2107503e99cfe9cc0c9cba19dad Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 26 Dec 2023 11:54:31 -0800 Subject: drm/i915/guc: reconcile Excess struct member kernel-doc warnings Document nested struct members with full names as described in Documentation/doc-guide/kernel-doc.rst. intel_guc.h:305: warning: Excess struct member 'lock' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'guc_ids' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'num_guc_ids' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'guc_ids_bitmap' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'guc_id_list' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'guc_ids_in_use' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'destroyed_contexts' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'destroyed_worker' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'reset_fail_worker' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'reset_fail_mask' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'sched_disable_delay_ms' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'sched_disable_gucid_threshold' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'lock' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'gt_stamp' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'ping_delay' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'work' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'shift' description in 'intel_guc' intel_guc.h:305: warning: Excess struct member 'last_stat_jiffies' description in 'intel_guc' 18 warnings as Errors Signed-off-by: Randy Dunlap Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Tvrtko Ursulin Cc: intel-gfx@lists.freedesktop.org Cc: Jonathan Corbet Cc: dri-devel@lists.freedesktop.org Reviewed-by: Rodrigo Vivi Reviewed-by: Andi Shyti Signed-off-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20231226195432.10891-3-rdunlap@infradead.org --- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 75 +++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 33 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index e22c12ce245a..813cc888e6fa 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -105,61 +105,67 @@ struct intel_guc { */ struct { /** - * @lock: protects everything in submission_state, - * ce->guc_id.id, and ce->guc_id.ref when transitioning in and - * out of zero + * @submission_state.lock: protects everything in + * submission_state, ce->guc_id.id, and ce->guc_id.ref + * when transitioning in and out of zero */ spinlock_t lock; /** - * @guc_ids: used to allocate new guc_ids, single-lrc + * @submission_state.guc_ids: used to allocate new + * guc_ids, single-lrc */ struct ida guc_ids; /** - * @num_guc_ids: Number of guc_ids, selftest feature to be able - * to reduce this number while testing. + * @submission_state.num_guc_ids: Number of guc_ids, selftest + * feature to be able to reduce this number while testing. */ int num_guc_ids; /** - * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc + * @submission_state.guc_ids_bitmap: used to allocate + * new guc_ids, multi-lrc */ unsigned long *guc_ids_bitmap; /** - * @guc_id_list: list of intel_context with valid guc_ids but no - * refs + * @submission_state.guc_id_list: list of intel_context + * with valid guc_ids but no refs */ struct list_head guc_id_list; /** - * @guc_ids_in_use: Number single-lrc guc_ids in use + * @submission_state.guc_ids_in_use: Number single-lrc + * guc_ids in use */ unsigned int guc_ids_in_use; /** - * @destroyed_contexts: list of contexts waiting to be destroyed - * (deregistered with the GuC) + * @submission_state.destroyed_contexts: list of contexts + * waiting to be destroyed (deregistered with the GuC) */ struct list_head destroyed_contexts; /** - * @destroyed_worker: worker to deregister contexts, need as we - * need to take a GT PM reference and can't from destroy - * function as it might be in an atomic context (no sleeping) + * @submission_state.destroyed_worker: worker to deregister + * contexts, need as we need to take a GT PM reference and + * can't from destroy function as it might be in an atomic + * context (no sleeping) */ struct work_struct destroyed_worker; /** - * @reset_fail_worker: worker to trigger a GT reset after an - * engine reset fails + * @submission_state.reset_fail_worker: worker to trigger + * a GT reset after an engine reset fails */ struct work_struct reset_fail_worker; /** - * @reset_fail_mask: mask of engines that failed to reset + * @submission_state.reset_fail_mask: mask of engines that + * failed to reset */ intel_engine_mask_t reset_fail_mask; /** - * @sched_disable_delay_ms: schedule disable delay, in ms, for - * contexts + * @submission_state.sched_disable_delay_ms: schedule + * disable delay, in ms, for contexts */ unsigned int sched_disable_delay_ms; /** - * @sched_disable_gucid_threshold: threshold of min remaining available - * guc_ids before we start bypassing the schedule disable delay + * @submission_state.sched_disable_gucid_threshold: + * threshold of min remaining available guc_ids before + * we start bypassing the schedule disable delay */ unsigned int sched_disable_gucid_threshold; } submission_state; @@ -243,37 +249,40 @@ struct intel_guc { */ struct { /** - * @lock: Lock protecting the below fields and the engine stats. + * @timestamp.lock: Lock protecting the below fields and + * the engine stats. */ spinlock_t lock; /** - * @gt_stamp: 64 bit extended value of the GT timestamp. + * @timestamp.gt_stamp: 64-bit extended value of the GT + * timestamp. */ u64 gt_stamp; /** - * @ping_delay: Period for polling the GT timestamp for - * overflow. + * @timestamp.ping_delay: Period for polling the GT + * timestamp for overflow. */ unsigned long ping_delay; /** - * @work: Periodic work to adjust GT timestamp, engine and - * context usage for overflows. + * @timestamp.work: Periodic work to adjust GT timestamp, + * engine and context usage for overflows. */ struct delayed_work work; /** - * @shift: Right shift value for the gpm timestamp + * @timestamp.shift: Right shift value for the gpm timestamp */ u32 shift; /** - * @last_stat_jiffies: jiffies at last actual stats collection time - * We use this timestamp to ensure we don't oversample the - * stats because runtime power management events can trigger - * stats collection at much higher rates than required. + * @timestamp.last_stat_jiffies: jiffies at last actual + * stats collection time. We use this timestamp to ensure + * we don't oversample the stats because runtime power + * management events can trigger stats collection at much + * higher rates than required. */ unsigned long last_stat_jiffies; } timestamp; -- cgit v1.2.3 From aa253baca534357e033bd29b074ce1eade2a9362 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 26 Dec 2023 11:54:32 -0800 Subject: drm/i915/perf: reconcile Excess struct member kernel-doc warnings Document nested struct members with full names as described in Documentation/doc-guide/kernel-doc.rst. i915_perf_types.h:341: warning: Excess struct member 'ptr_lock' description in 'i915_perf_stream' i915_perf_types.h:341: warning: Excess struct member 'head' description in 'i915_perf_stream' i915_perf_types.h:341: warning: Excess struct member 'tail' description in 'i915_perf_stream' 3 warnings as Errors Signed-off-by: Randy Dunlap Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Tvrtko Ursulin Cc: intel-gfx@lists.freedesktop.org Cc: Jonathan Corbet Cc: dri-devel@lists.freedesktop.org Reviewed-by: Rodrigo Vivi Reviewed-by: Andi Shyti Signed-off-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20231226195432.10891-4-rdunlap@infradead.org --- drivers/gpu/drm/i915/i915_perf_types.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h index 13b1ae9b96c7..46445248d193 100644 --- a/drivers/gpu/drm/i915/i915_perf_types.h +++ b/drivers/gpu/drm/i915/i915_perf_types.h @@ -291,7 +291,8 @@ struct i915_perf_stream { int size_exponent; /** - * @ptr_lock: Locks reads and writes to all head/tail state + * @oa_buffer.ptr_lock: Locks reads and writes to all + * head/tail state * * Consider: the head and tail pointer state needs to be read * consistently from a hrtimer callback (atomic context) and @@ -313,7 +314,8 @@ struct i915_perf_stream { spinlock_t ptr_lock; /** - * @head: Although we can always read back the head pointer register, + * @oa_buffer.head: Although we can always read back + * the head pointer register, * we prefer to avoid trusting the HW state, just to avoid any * risk that some hardware condition could * somehow bump the * head pointer unpredictably and cause us to forward the wrong @@ -322,7 +324,8 @@ struct i915_perf_stream { u32 head; /** - * @tail: The last verified tail that can be read by userspace. + * @oa_buffer.tail: The last verified tail that can be + * read by userspace. */ u32 tail; } oa_buffer; -- cgit v1.2.3 From 835e4d9bb3a13879031942ca6692d5a82ec00158 Mon Sep 17 00:00:00 2001 From: Shuicheng Lin Date: Tue, 2 Jan 2024 01:02:31 +0000 Subject: drm/i915/guc: Change wa and EU_PERF_CNTL registers to MCR type Some of the wa registers are MCR register, and EU_PERF_CNTL registers are MCR register. MCR register needs extra process for read/write. As normal MMIO register also could work with the MCR register process, change all wa registers to MCR type for code simplicity. Signed-off-by: Shuicheng Lin Cc: Matt Roper Cc: Umesh Nerlige Ramappa Reviewed-by: Matt Roper Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240102010231.843778-1-shuicheng.lin@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 63724e17829a..f7372f736a77 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -377,8 +377,13 @@ static int guc_mmio_regset_init(struct temp_regset *regset, CCS_MASK(engine->gt)) ret |= GUC_MMIO_REG_ADD(gt, regset, GEN12_RCU_MODE, true); + /* + * some of the WA registers are MCR registers. As it is safe to + * use MCR form for non-MCR registers, for code simplicity, all + * WA registers are added with MCR form. + */ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) - ret |= GUC_MMIO_REG_ADD(gt, regset, wa->reg, wa->masked_reg); + ret |= GUC_MCR_REG_ADD(gt, regset, wa->mcr_reg, wa->masked_reg); /* Be extra paranoid and include all whitelist registers. */ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) @@ -394,13 +399,13 @@ static int guc_mmio_regset_init(struct temp_regset *regset, ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false); if (GRAPHICS_VER(engine->i915) >= 12) { - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL0, false); - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL1, false); - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL2, false); - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL3, false); - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL4, false); - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL5, false); - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL6, false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL0)), false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL1)), false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL2)), false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL3)), false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL4)), false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL5)), false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL6)), false); } return ret ? -1 : 0; -- cgit v1.2.3 From de06b42edc5bf05aefbb7e2f59475d6022ed57e1 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 4 Jan 2024 18:46:00 +0200 Subject: drm/i915: don't make assumptions about intel_wakeref_t type intel_wakeref_t is supposed to be a mostly opaque cookie to its users. It should only be checked for being non-zero and set to zero. Debug logging its actual value is meaningless. Switch to just debug logging whether the async_put_wakeref is non-zero. The issue dates back to much earlier than commit b49e894c3fd8 ("drm/i915: Replace custom intel runtime_pm tracker with ref_tracker library"), but this is the one that brought about a build failure due to the printf format. Reported-by: Stephen Rothwell Closes: https://lore.kernel.org/r/20240102111222.2db11208@canb.auug.org.au Fixes: b49e894c3fd8 ("drm/i915: Replace custom intel runtime_pm tracker with ref_tracker library") Cc: Andrzej Hajda Cc: Imre Deak Signed-off-by: Jani Nikula Reviewed-by: Imre Deak Reviewed-by: Andrzej Hajda Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20240104164600.783371-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index f78bfcf2ce00..42da2af80976 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -405,8 +405,8 @@ print_async_put_domains_state(struct i915_power_domains *power_domains) struct drm_i915_private, display.power.domains); - drm_dbg(&i915->drm, "async_put_wakeref %lu\n", - power_domains->async_put_wakeref); + drm_dbg(&i915->drm, "async_put_wakeref: %s\n", + str_yes_no(power_domains->async_put_wakeref)); print_power_domains(power_domains, "async_put_domains[0]", &power_domains->async_put_domains[0]); -- cgit v1.2.3 From c5b32a41946139b9f4f7a087fda2355a90f671cb Mon Sep 17 00:00:00 2001 From: Tejas Upadhyay Date: Wed, 3 Jan 2024 11:01:11 +0530 Subject: drm/i915/xelpg: Add workaround 14019877138 WA 14019877138 needed for Graphics 12.70/71 both V2(Jani): - Use drm/i915 Signed-off-by: Tejas Upadhyay Reviewed-by: Matt Roper Reviewed-by: Andi Shyti Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240103053111.763172-1-tejas.upadhyay@intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 3eacbc50caf8..270b56fc85e2 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -820,6 +820,9 @@ static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine, /* Wa_18019271663 */ wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE); + + /* Wa_14019877138 */ + wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT); } static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine, -- cgit v1.2.3 From a797099562267ebb281acd59750f1a8dbba36eef Mon Sep 17 00:00:00 2001 From: John Harrison Date: Tue, 2 Jan 2024 14:22:02 -0800 Subject: drm/i915/huc: Allow for very slow HuC loading A failure to load the HuC is occasionally observed where the cause is believed to be a low GT frequency leading to very long load times. So a) increase the timeout so that the user still gets a working system even in the case of slow load. And b) report the frequency during the load to see if that is the cause of the slow down. Also update the similar code on the GuC load to not use uncore->gt when there is a local gt available. The two should match, but no need for unnecessary de-referencing. Signed-off-by: John Harrison Reviewed-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20240102222202.310495-1-John.C.Harrison@Intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 10 ++--- drivers/gpu/drm/i915/gt/uc/intel_huc.c | 64 ++++++++++++++++++++++++++++--- 2 files changed, 63 insertions(+), 11 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 0f79cb658518..52332bb14339 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -184,7 +184,7 @@ static int guc_wait_ucode(struct intel_guc *guc) * in the seconds range. However, there is a limit on how long an * individual wait_for() can wait. So wrap it in a loop. */ - before_freq = intel_rps_read_actual_frequency(&uncore->gt->rps); + before_freq = intel_rps_read_actual_frequency(>->rps); before = ktime_get(); for (count = 0; count < GUC_LOAD_RETRY_LIMIT; count++) { ret = wait_for(guc_load_done(uncore, &status, &success), 1000); @@ -192,7 +192,7 @@ static int guc_wait_ucode(struct intel_guc *guc) break; guc_dbg(guc, "load still in progress, count = %d, freq = %dMHz, status = 0x%08X [0x%02X/%02X]\n", - count, intel_rps_read_actual_frequency(&uncore->gt->rps), status, + count, intel_rps_read_actual_frequency(>->rps), status, REG_FIELD_GET(GS_BOOTROM_MASK, status), REG_FIELD_GET(GS_UKERNEL_MASK, status)); } @@ -204,7 +204,7 @@ static int guc_wait_ucode(struct intel_guc *guc) u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status); guc_info(guc, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz, ret = %d\n", - status, delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps), ret); + status, delta_ms, intel_rps_read_actual_frequency(>->rps), ret); guc_info(guc, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n", REG_FIELD_GET(GS_MIA_IN_RESET, status), bootrom, ukernel, @@ -254,11 +254,11 @@ static int guc_wait_ucode(struct intel_guc *guc) guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n", delta_ms, status, count, ret); guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n", - intel_rps_read_actual_frequency(&uncore->gt->rps), before_freq, + intel_rps_read_actual_frequency(>->rps), before_freq, intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt))); } else { guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n", - delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps), + delta_ms, intel_rps_read_actual_frequency(>->rps), before_freq, status, count, ret); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index ba9e07fc2b57..0945b177d5f9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -6,6 +6,7 @@ #include #include "gt/intel_gt.h" +#include "gt/intel_rps.h" #include "intel_guc_reg.h" #include "intel_huc.h" #include "intel_huc_print.h" @@ -447,17 +448,68 @@ static const char *auth_mode_string(struct intel_huc *huc, return partial ? "clear media" : "all workloads"; } +/* + * Use a longer timeout for debug builds so that problems can be detected + * and analysed. But a shorter timeout for releases so that user's don't + * wait forever to find out there is a problem. Note that the only reason + * an end user should hit the timeout is in case of extreme thermal throttling. + * And a system that is that hot during boot is probably dead anyway! + */ +#if defined(CONFIG_DRM_I915_DEBUG_GEM) +#define HUC_LOAD_RETRY_LIMIT 20 +#else +#define HUC_LOAD_RETRY_LIMIT 3 +#endif + int intel_huc_wait_for_auth_complete(struct intel_huc *huc, enum intel_huc_authentication_type type) { struct intel_gt *gt = huc_to_gt(huc); - int ret; + struct intel_uncore *uncore = gt->uncore; + ktime_t before, after, delta; + int ret, count; + u64 delta_ms; + u32 before_freq; - ret = __intel_wait_for_register(gt->uncore, - huc->status[type].reg, - huc->status[type].mask, - huc->status[type].value, - 2, 50, NULL); + /* + * The KMD requests maximum frequency during driver load, however thermal + * throttling can force the frequency down to minimum (although the board + * really should never get that hot in real life!). IFWI issues have been + * seen to cause sporadic failures to grant the higher frequency. And at + * minimum frequency, the authentication time can be in the seconds range. + * Note that there is a limit on how long an individual wait_for() can wait. + * So wrap it in a loop. + */ + before_freq = intel_rps_read_actual_frequency(>->rps); + before = ktime_get(); + for (count = 0; count < HUC_LOAD_RETRY_LIMIT; count++) { + ret = __intel_wait_for_register(gt->uncore, + huc->status[type].reg, + huc->status[type].mask, + huc->status[type].value, + 2, 1000, NULL); + if (!ret) + break; + + huc_dbg(huc, "auth still in progress, count = %d, freq = %dMHz, status = 0x%08X\n", + count, intel_rps_read_actual_frequency(>->rps), + huc->status[type].reg.reg); + } + after = ktime_get(); + delta = ktime_sub(after, before); + delta_ms = ktime_to_ms(delta); + + if (delta_ms > 50) { + huc_warn(huc, "excessive auth time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n", + delta_ms, huc->status[type].reg.reg, count, ret); + huc_warn(huc, "excessive auth time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n", + intel_rps_read_actual_frequency(>->rps), before_freq, + intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt))); + } else { + huc_dbg(huc, "auth took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n", + delta_ms, intel_rps_read_actual_frequency(>->rps), + before_freq, huc->status[type].reg.reg, count, ret); + } /* mark the load process as complete even if the wait failed */ delayed_huc_load_complete(huc); -- cgit v1.2.3 From 5e83c060e95bea2cf76d01fa554cd31a1727e19a Mon Sep 17 00:00:00 2001 From: Alan Previn Date: Wed, 27 Dec 2023 20:55:57 -0800 Subject: drm/i915/guc: Flush context destruction worker at suspend When suspending, flush the context-guc-id deregistration worker at the final stages of intel_gt_suspend_late when we finally call gt_sanitize that eventually leads down to __uc_sanitize so that the deregistration worker doesn't fire off later as we reset the GuC microcontroller. Signed-off-by: Alan Previn Reviewed-by: Rodrigo Vivi Tested-by: Mousumi Jana Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20231228045558.536585-2-alan.previn.teres.alexis@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 5 +++++ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h | 2 ++ drivers/gpu/drm/i915/gt/uc/intel_uc.c | 2 ++ 3 files changed, 9 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index a259f1118c5a..9c64ae0766cc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1613,6 +1613,11 @@ static void guc_flush_submissions(struct intel_guc *guc) spin_unlock_irqrestore(&sched_engine->lock, flags); } +void intel_guc_submission_flush_work(struct intel_guc *guc) +{ + flush_work(&guc->submission_state.destroyed_worker); +} + static void guc_flush_destroyed_contexts(struct intel_guc *guc); void intel_guc_submission_reset_prepare(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index c57b29cdb1a6..b6df75622d3b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -38,6 +38,8 @@ int intel_guc_wait_for_pending_msg(struct intel_guc *guc, bool interruptible, long timeout); +void intel_guc_submission_flush_work(struct intel_guc *guc); + static inline bool intel_guc_submission_is_supported(struct intel_guc *guc) { return guc->submission_supported; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 3872d309ed31..b8b09b1bee3e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -690,6 +690,8 @@ void intel_uc_suspend(struct intel_uc *uc) return; } + intel_guc_submission_flush_work(guc); + with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) { err = intel_guc_suspend(guc); if (err) -- cgit v1.2.3 From 2f2cc53b5fe7022f3ae602eb24573d52f8740959 Mon Sep 17 00:00:00 2001 From: Alan Previn Date: Fri, 29 Dec 2023 13:51:43 -0800 Subject: drm/i915/guc: Close deregister-context race against CT-loss If we are at the end of suspend or very early in resume its possible an async fence signal (via rcu_call) is triggered to free_engines which could lead us to the execution of the context destruction worker (after a prior worker flush). Thus, when suspending, insert rcu_barriers at the start of i915_gem_suspend (part of driver's suspend prepare) and again in i915_gem_suspend_late so that all such cases have completed and context destruction list isn't missing anything. In destroyed_worker_func, close the race against CT-loss by checking that CT is enabled before calling into deregister_destroyed_contexts. Based on testing, guc_lrc_desc_unpin may still race and fail as we traverse the GuC's context-destroy list because the CT could be disabled right before calling GuC's CT send function. We've witnessed this race condition once every ~6000-8000 suspend-resume cycles while ensuring workloads that render something onscreen is continuously started just before we suspend (and the workload is small enough to complete and trigger the queued engine/context free-up either very late in suspend or very early in resume). In such a case, we need to unroll the entire process because guc-lrc-unpin takes a gt wakeref which only gets released in the G2H IRQ reply that never comes through in this corner case. Without the unroll, the taken wakeref is leaked and will cascade into a kernel hang later at the tail end of suspend in this function: intel_wakeref_wait_for_idle(>->wakeref) (called by) - intel_gt_pm_wait_for_idle (called by) - wait_for_suspend Thus, do an unroll in guc_lrc_desc_unpin and deregister_destroyed_- contexts if guc_lrc_desc_unpin fails due to CT send falure. When unrolling, keep the context in the GuC's destroy-list so it can get picked up on the next destroy worker invocation (if suspend aborted) or get fully purged as part of a GuC sanitization (end of suspend) or a reset flow. Signed-off-by: Alan Previn Signed-off-by: Anshuman Gupta Tested-by: Mousumi Jana Acked-by: Daniele Ceraolo Spurio Reviewed-by: Rodrigo Vivi Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20231229215143.581619-1-alan.previn.teres.alexis@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 10 ++++ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 73 +++++++++++++++++++++-- 2 files changed, 78 insertions(+), 5 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 0d812f4d787d..3b27218aabe2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -28,6 +28,13 @@ void i915_gem_suspend(struct drm_i915_private *i915) GEM_TRACE("%s\n", dev_name(i915->drm.dev)); intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0); + /* + * On rare occasions, we've observed the fence completion triggers + * free_engines asynchronously via rcu_call. Ensure those are done. + * This path is only called on suspend, so it's an acceptable cost. + */ + rcu_barrier(); + flush_workqueue(i915->wq); /* @@ -160,6 +167,9 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) * machine in an unusable condition. */ + /* Like i915_gem_suspend, flush tasks staged from fence triggers */ + rcu_barrier(); + for_each_gt(gt, i915, i) intel_gt_suspend_late(gt); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 9c64ae0766cc..cae637fc3ead 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -236,6 +236,13 @@ set_context_destroyed(struct intel_context *ce) ce->guc_state.sched_state |= SCHED_STATE_DESTROYED; } +static inline void +clr_context_destroyed(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state &= ~SCHED_STATE_DESTROYED; +} + static inline bool context_pending_disable(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE; @@ -613,6 +620,8 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc, u32 g2h_len_dw, bool loop) { + int ret; + /* * We always loop when a send requires a reply (i.e. g2h_len_dw > 0), * so we don't handle the case where we don't get a reply because we @@ -623,7 +632,11 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc, if (g2h_len_dw) atomic_inc(&guc->outstanding_submission_g2h); - return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop); + ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop); + if (ret) + atomic_dec(&guc->outstanding_submission_g2h); + + return ret; } int intel_guc_wait_for_pending_msg(struct intel_guc *guc, @@ -3288,12 +3301,13 @@ static void guc_context_close(struct intel_context *ce) spin_unlock_irqrestore(&ce->guc_state.lock, flags); } -static inline void guc_lrc_desc_unpin(struct intel_context *ce) +static inline int guc_lrc_desc_unpin(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); struct intel_gt *gt = guc_to_gt(guc); unsigned long flags; bool disabled; + int ret; GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id)); @@ -3304,18 +3318,41 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) spin_lock_irqsave(&ce->guc_state.lock, flags); disabled = submission_disabled(guc); if (likely(!disabled)) { + /* + * Take a gt-pm ref and change context state to be destroyed. + * NOTE: a G2H IRQ that comes after will put this gt-pm ref back + */ __intel_gt_pm_get(gt); set_context_destroyed(ce); clr_context_registered(ce); } spin_unlock_irqrestore(&ce->guc_state.lock, flags); + if (unlikely(disabled)) { release_guc_id(guc, ce); __guc_context_destroy(ce); - return; + return 0; } - deregister_context(ce, ce->guc_id.id); + /* + * GuC is active, lets destroy this context, but at this point we can still be racing + * with suspend, so we undo everything if the H2G fails in deregister_context so + * that GuC reset will find this context during clean up. + */ + ret = deregister_context(ce, ce->guc_id.id); + if (ret) { + spin_lock(&ce->guc_state.lock); + set_context_registered(ce); + clr_context_destroyed(ce); + spin_unlock(&ce->guc_state.lock); + /* + * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements + * the wakeref immediately but per function spec usage call this after unlock. + */ + intel_wakeref_put_async(>->wakeref); + } + + return ret; } static void __guc_context_destroy(struct intel_context *ce) @@ -3383,7 +3420,22 @@ static void deregister_destroyed_contexts(struct intel_guc *guc) if (!ce) break; - guc_lrc_desc_unpin(ce); + if (guc_lrc_desc_unpin(ce)) { + /* + * This means GuC's CT link severed mid-way which could happen + * in suspend-resume corner cases. In this case, put the + * context back into the destroyed_contexts list which will + * get picked up on the next context deregistration event or + * purged in a GuC sanitization event (reset/unload/wedged/...). + */ + spin_lock_irqsave(&guc->submission_state.lock, flags); + list_add_tail(&ce->destroyed_link, + &guc->submission_state.destroyed_contexts); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); + /* Bail now since the list might never be emptied if h2gs fail */ + break; + } + } } @@ -3394,6 +3446,17 @@ static void destroyed_worker_func(struct work_struct *w) struct intel_gt *gt = guc_to_gt(guc); intel_wakeref_t wakeref; + /* + * In rare cases we can get here via async context-free fence-signals that + * come very late in suspend flow or very early in resume flows. In these + * cases, GuC won't be ready but just skipping it here is fine as these + * pending-destroy-contexts get destroyed totally at GuC reset time at the + * end of suspend.. OR.. this worker can be picked up later on the next + * context destruction trigger after resume-completes + */ + if (!intel_guc_is_ready(guc)) + return; + with_intel_gt_pm(gt, wakeref) deregister_destroyed_contexts(guc); } -- cgit v1.2.3 From 0e00a8814eec16057e783170456442adde80c0b4 Mon Sep 17 00:00:00 2001 From: John Harrison Date: Tue, 19 Dec 2023 11:59:57 -0800 Subject: drm/i915/guc: Avoid circular locking issue on busyness flush Avoid the following lockdep complaint: <4> [298.856498] ====================================================== <4> [298.856500] WARNING: possible circular locking dependency detected <4> [298.856503] 6.7.0-rc5-CI_DRM_14017-g58ac4ffc75b6+ #1 Tainted: G N <4> [298.856505] ------------------------------------------------------ <4> [298.856507] kworker/4:1H/190 is trying to acquire lock: <4> [298.856509] ffff8881103e9978 (>->reset.backoff_srcu){++++}-{0:0}, at: _intel_gt_reset_lock+0x35/0x380 [i915] <4> [298.856661] but task is already holding lock: <4> [298.856663] ffffc900013f7e58 ((work_completion)(&(&guc->timestamp.work)->work)){+.+.}-{0:0}, at: process_scheduled_works+0x264/0x530 <4> [298.856671] which lock already depends on the new lock. The complaint is not actually valid. The busyness worker thread does indeed hold the worker lock and then attempt to acquire the reset lock (which may have happened in reverse order elsewhere). However, it does so with a trylock that exits if the reset lock is not available (specifically to prevent this and other similar deadlocks). Unfortunately, lockdep does not understand the trylock semantics (the lock is an i915 specific custom implementation for resets). Not doing a synchronous flush of the worker thread when a reset is in progress resolves the lockdep splat by never even attempting to grab the lock in this particular scenario. There are situatons where a synchronous cancel is required, however. So, always do the synchronous cancel if not in reset. And add an extra synchronous cancel to the end of the reset flow to account for when a reset is occurring at driver shutdown and the cancel is no longer synchronous but could lead to unallocated memory accesses if the worker is not stopped. Signed-off-by: Zhanjun Dong Signed-off-by: John Harrison Cc: Andi Shyti Cc: Daniel Vetter Reviewed-by: Andi Shyti Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20231219195957.212600-1-John.C.Harrison@Intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 48 ++++++++++++++++++++++- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 2 +- 2 files changed, 48 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index cae637fc3ead..f3dcae4b9d45 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1375,7 +1375,45 @@ static void guc_enable_busyness_worker(struct intel_guc *guc) static void guc_cancel_busyness_worker(struct intel_guc *guc) { - cancel_delayed_work_sync(&guc->timestamp.work); + /* + * There are many different call stacks that can get here. Some of them + * hold the reset mutex. The busyness worker also attempts to acquire the + * reset mutex. Synchronously flushing a worker thread requires acquiring + * the worker mutex. Lockdep sees this as a conflict. It thinks that the + * flush can deadlock because it holds the worker mutex while waiting for + * the reset mutex, but another thread is holding the reset mutex and might + * attempt to use other worker functions. + * + * In practice, this scenario does not exist because the busyness worker + * does not block waiting for the reset mutex. It does a try-lock on it and + * immediately exits if the lock is already held. Unfortunately, the mutex + * in question (I915_RESET_BACKOFF) is an i915 implementation which has lockdep + * annotation but not to the extent of explaining the 'might lock' is also a + * 'does not need to lock'. So one option would be to add more complex lockdep + * annotations to ignore the issue (if at all possible). A simpler option is to + * just not flush synchronously when a rest in progress. Given that the worker + * will just early exit and re-schedule itself anyway, there is no advantage + * to running it immediately. + * + * If a reset is not in progress, then the synchronous flush may be required. + * As noted many call stacks lead here, some during suspend and driver unload + * which do require a synchronous flush to make sure the worker is stopped + * before memory is freed. + * + * Trying to pass a 'need_sync' or 'in_reset' flag all the way down through + * every possible call stack is unfeasible. It would be too intrusive to many + * areas that really don't care about the GuC backend. However, there is the + * 'reset_in_progress' flag available, so just use that. + * + * And note that in the case of a reset occurring during driver unload + * (wedge_on_fini), skipping the cancel in _prepare (when the reset flag is set + * is fine because there is another cancel in _finish (when the reset flag is + * not). + */ + if (guc_to_gt(guc)->uc.reset_in_progress) + cancel_delayed_work(&guc->timestamp.work); + else + cancel_delayed_work_sync(&guc->timestamp.work); } static void __reset_guc_busyness_stats(struct intel_guc *guc) @@ -1966,8 +2004,16 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc) void intel_guc_submission_reset_finish(struct intel_guc *guc) { + /* + * Ensure the busyness worker gets cancelled even on a fatal wedge. + * Note that reset_prepare is not allowed to because it confuses lockdep. + */ + if (guc_submission_initialized(guc)) + guc_cancel_busyness_worker(guc); + /* Reset called during driver load or during wedge? */ if (unlikely(!guc_submission_initialized(guc) || + !intel_guc_is_fw_running(guc) || intel_gt_is_wedged(guc_to_gt(guc)))) { return; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index b8b09b1bee3e..6dfe5d9456c6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -640,7 +640,7 @@ void intel_uc_reset_finish(struct intel_uc *uc) uc->reset_in_progress = false; /* Firmware expected to be running when this function is called */ - if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc)) + if (intel_uc_uses_guc_submission(uc)) intel_guc_submission_reset_finish(guc); } -- cgit v1.2.3 From 284781470de227e6177e491ad091d72492290a65 Mon Sep 17 00:00:00 2001 From: Juan Escamilla Date: Tue, 9 Jan 2024 17:03:00 -0800 Subject: drm/i915/gt: Use rc6.supported flag from intel_gt for rc6_enable sysfs Currently if rc6 is supported, it gets enabled and the sysfs files for rc6_enable_show and rc6_enable_dev_show uses masks to check information from drm_i915_private. However rc6_support functions take more variables and conditions into consideration and thus these masks are not enough for most of the modern hardware and it is simpley lyting to the user. Let's fix it by at least use the rc6.supported flag from intel_gt information. Signed-off-by: Juan Escamilla Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20240110010302.553597-1-jcescami@wasd.net --- drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c index f0dea54880af..2d3c4dab6d21 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c @@ -176,27 +176,13 @@ static u32 get_residency(struct intel_gt *gt, enum intel_rc6_res_type id) return DIV_ROUND_CLOSEST_ULL(res, 1000); } -static u8 get_rc6_mask(struct intel_gt *gt) -{ - u8 mask = 0; - - if (HAS_RC6(gt->i915)) - mask |= BIT(0); - if (HAS_RC6p(gt->i915)) - mask |= BIT(1); - if (HAS_RC6pp(gt->i915)) - mask |= BIT(2); - - return mask; -} - static ssize_t rc6_enable_show(struct kobject *kobj, struct kobj_attribute *attr, char *buff) { struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); - return sysfs_emit(buff, "%x\n", get_rc6_mask(gt)); + return sysfs_emit(buff, "%x\n", gt->rc6.supported); } static ssize_t rc6_enable_dev_show(struct device *dev, @@ -205,7 +191,7 @@ static ssize_t rc6_enable_dev_show(struct device *dev, { struct intel_gt *gt = intel_gt_sysfs_get_drvdata(&dev->kobj, attr->attr.name); - return sysfs_emit(buff, "%x\n", get_rc6_mask(gt)); + return sysfs_emit(buff, "%x\n", gt->rc6.supported); } static u32 __rc6_residency_ms_show(struct intel_gt *gt) -- cgit v1.2.3 From 84bf82f4f8661930a134a1d86bde16f7d8bcd699 Mon Sep 17 00:00:00 2001 From: Harish Chegondi Date: Mon, 8 Jan 2024 17:57:37 +0530 Subject: drm/i915/xelpg: Extend driver code of Xe_LPG to Xe_LPG+ Xe_LPG+ (IP version 12.74) should take the same general code paths as Xe_LPG (versions 12.70 and 12.71). Xe_LPG+'s workaround list will be handled by the next patch. Signed-off-by: Harish Chegondi Signed-off-by: Haridhar Kalvala Reviewed-by: Matt Roper Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240108122738.14399-3-haridhar.kalvala@intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 3 ++- drivers/gpu/drm/i915/gt/intel_mocs.c | 2 +- drivers/gpu/drm/i915/gt/intel_rc6.c | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 40687806d22a..1ade568ffbfa 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1190,7 +1190,8 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine) num = ARRAY_SIZE(xelpmp_regs); } } else { - if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) || + if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 74) || + GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) || GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) || GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) || GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) { diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 353f93baaca0..25c1023eb5f9 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -495,7 +495,7 @@ static unsigned int get_mocs_settings(struct drm_i915_private *i915, memset(table, 0, sizeof(struct drm_i915_mocs_table)); table->unused_entries_index = I915_MOCS_PTE; - if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 71))) { + if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 74))) { table->size = ARRAY_SIZE(mtl_mocs_table); table->table = mtl_mocs_table; table->n_entries = MTL_NUM_MOCS_ENTRIES; diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 7090e4be29cb..8f4b3c8af09c 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -123,7 +123,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) * temporary wa and should be removed after fixing real cause * of forcewake timeouts. */ - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) pg_enable = GEN9_MEDIA_PG_ENABLE | GEN11_MEDIA_SAMPLER_PG_ENABLE; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index beffac46a5e2..a5410b41744c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -145,7 +145,7 @@ static const char *i915_cache_level_str(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = obj_to_i915(obj); - if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 71))) { + if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 74))) { switch (obj->pat_index) { case 0: return " WB"; case 1: return " WT"; -- cgit v1.2.3 From c44d4ef47fdad0a33966de89f9064e19736bb52f Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 8 Jan 2024 17:57:38 +0530 Subject: drm/i915/xelpg: Extend some workarounds/tuning to gfx version 12.74 Some of our existing Xe_LPG workarounds and tuning are also applicable to the version 12.74 variant. Extend the condition bounds accordingly. Also fix the comment on Wa_14018575942 while we're at it. v2: Extend some more workarounds (Harish) Signed-off-by: Matt Roper Signed-off-by: Harish Chegondi Signed-off-by: Haridhar Kalvala Reviewed-by: Matt Atwood Link: https://patchwork.freedesktop.org/patch/msgid/20240108122738.14399-4-haridhar.kalvala@intel.com --- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 4 ++-- drivers/gpu/drm/i915/gt/intel_workarounds.c | 24 +++++++++++++++--------- drivers/gpu/drm/i915/i915_perf.c | 2 +- 3 files changed, 18 insertions(+), 12 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 86a04afff64b..e1bf13e3d307 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -226,7 +226,7 @@ u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs) static int mtl_dummy_pipe_control(struct i915_request *rq) { /* Wa_14016712196 */ - if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 71)) || + if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(rq->i915)) { u32 *cs; @@ -822,7 +822,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) flags |= PIPE_CONTROL_FLUSH_L3; /* Wa_14016712196 */ - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915)) + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915)) /* dummy PIPE_CONTROL + depth flush */ cs = gen12_emit_pipe_control(cs, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 270b56fc85e2..91814e3abd5c 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -789,8 +789,13 @@ static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine, dg2_ctx_gt_tuning_init(engine, wal); - if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) || - IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER)) + /* + * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in + * gen12_emit_indirect_ctx_rcs() rather than here on some early + * steppings. + */ + if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || + IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))) wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false); } @@ -911,7 +916,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, if (engine->class != RENDER_CLASS) goto done; - if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71))) + if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74))) xelpg_ctx_workarounds_init(engine, wal); else if (IS_PONTEVECCHIO(i915)) ; /* noop; none at this time */ @@ -1646,7 +1651,7 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) static void xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - /* Wa_14018778641 / Wa_18018781329 */ + /* Wa_14018575942 / Wa_18018781329 */ wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB); /* Wa_22016670082 */ @@ -1713,7 +1718,7 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) */ static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal) { - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) { + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) { wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS); wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS); } @@ -1746,7 +1751,7 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) return; } - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) xelpg_gt_workarounds_init(gt, wal); else if (IS_PONTEVECCHIO(i915)) pvc_gt_workarounds_init(gt, wal); @@ -2219,7 +2224,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) if (engine->gt->type == GT_MEDIA) ; /* none yet */ - else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71))) + else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74))) xelpg_whitelist_build(engine); else if (IS_PONTEVECCHIO(i915)) pvc_whitelist_build(engine); @@ -2831,7 +2836,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt, { struct drm_i915_private *i915 = gt->i915; - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915)) + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915)) wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512); /* @@ -2884,7 +2889,8 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li } if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) || - IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER)) + IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) || + IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) /* Wa_14017856879 */ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index dbbc95d18550..9cefdc845bdb 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3225,7 +3225,7 @@ u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915) struct intel_gt *gt = to_gt(i915); /* Wa_18013179988 */ - if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) { + if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) { intel_wakeref_t wakeref; u32 reg, shift; -- cgit v1.2.3 From 6d46d09a0d7dd412c5b76f74f89fe4448ba2117e Mon Sep 17 00:00:00 2001 From: Vinay Belgaumkar Date: Fri, 19 Jan 2024 11:35:13 -0800 Subject: drm/i915/mtl: Wake GT before sending H2G message Instead of waiting until the interrupt reaches GuC, we can grab a forcewake while triggering the H2G interrupt. GEN11_GUC_HOST_INTERRUPT is inside sgunit and is not affected by forcewakes. However, there could be some delays when platform is entering/exiting some higher level platform sleep states and a H2G is triggered. A forcewake ensures those sleep states have been fully exited and further processing occurs as expected. The hysteresis timers for C6 and higher sleep states will ensure there is no unwanted race between the wake and processing of the interrupts by GuC. This will have an official WA soon so adding a FIXME in the comments. v2: Make the new ranges watertight to address BAT failures and update commit message (Matt R). Reviewed-by: Matt Roper Signed-off-by: Vinay Belgaumkar Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20240119193513.221730-1-vinay.belgaumkar@intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index dfefad5a5fec..76400e9c40f0 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1800,7 +1800,10 @@ static const struct intel_forcewake_range __mtl_fw_ranges[] = { GEN_FW_RANGE(0x24000, 0x2ffff, 0), /* 0x24000 - 0x2407f: always on 0x24080 - 0x2ffff: reserved */ - GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT) + GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), + GEN_FW_RANGE(0x40000, 0x1901ef, 0), + GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT) + /* FIXME: WA to wake GT while triggering H2G */ }; /* -- cgit v1.2.3 From 4104e634bba446a7b32b4accc980d97dde849f0d Mon Sep 17 00:00:00 2001 From: Juan Escamilla Date: Tue, 16 Jan 2024 09:29:19 -0800 Subject: drm/i915/gt: Reflect the true and current status of rc6_enable The sysfs file is named 'enabled', thus users might want to know the true state of the RC6 instead of only the indication if the RC6 should be enabled. Let's use rc6.enable directly instead of rc6.supported. Signed-off-by: Juan Escamilla Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20240116172922.3460695-1-jcescami@wasd.net --- drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c index 2d3c4dab6d21..c0b202223940 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c @@ -182,7 +182,7 @@ static ssize_t rc6_enable_show(struct kobject *kobj, { struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); - return sysfs_emit(buff, "%x\n", gt->rc6.supported); + return sysfs_emit(buff, "%x\n", gt->rc6.enabled); } static ssize_t rc6_enable_dev_show(struct device *dev, @@ -191,7 +191,7 @@ static ssize_t rc6_enable_dev_show(struct device *dev, { struct intel_gt *gt = intel_gt_sysfs_get_drvdata(&dev->kobj, attr->attr.name); - return sysfs_emit(buff, "%x\n", gt->rc6.supported); + return sysfs_emit(buff, "%x\n", gt->rc6.enabled); } static u32 __rc6_residency_ms_show(struct intel_gt *gt) -- cgit v1.2.3 From d2435a8e3d683adb9143b9ad3c416ac3a4ca9688 Mon Sep 17 00:00:00 2001 From: Erick Archer Date: Thu, 8 Feb 2024 19:13:18 +0100 Subject: drm/i915: Add flex arrays to struct i915_syncmap The "struct i915_syncmap" uses a dynamically sized set of trailing elements. It can use an "u32" array or a "struct i915_syncmap *" array. So, use the preferred way in the kernel declaring flexible arrays [1]. Because there are two possibilities for the trailing arrays, it is necessary to declare a union and use the DECLARE_FLEX_ARRAY macro. The comment can be removed as the union is now clear enough. Also, avoid the open-coded arithmetic in the memory allocator functions [2] using the "struct_size" macro. Moreover, refactor the "__sync_seqno" and "__sync_child" functions due to now it is possible to use the union members added to the structure. This way, it is also possible to avoid the open-coded arithmetic in pointers. Link: https://www.kernel.org/doc/html/next/process/deprecated.html#zero-length-and-one-element-arrays [1] Link: https://www.kernel.org/doc/html/next/process/deprecated.html#open-coded-arithmetic-in-allocator-arguments [2] Signed-off-by: Erick Archer Reviewed-by: Kees Cook Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20240208181318.4259-1-erick.archer@gmx.com --- drivers/gpu/drm/i915/i915_syncmap.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c index 60404dbb2e9f..df6437c37373 100644 --- a/drivers/gpu/drm/i915/i915_syncmap.c +++ b/drivers/gpu/drm/i915/i915_syncmap.c @@ -75,13 +75,10 @@ struct i915_syncmap { unsigned int height; unsigned int bitmap; struct i915_syncmap *parent; - /* - * Following this header is an array of either seqno or child pointers: - * union { - * u32 seqno[KSYNCMAP]; - * struct i915_syncmap *child[KSYNCMAP]; - * }; - */ + union { + DECLARE_FLEX_ARRAY(u32, seqno); + DECLARE_FLEX_ARRAY(struct i915_syncmap *, child); + }; }; /** @@ -99,13 +96,13 @@ void i915_syncmap_init(struct i915_syncmap **root) static inline u32 *__sync_seqno(struct i915_syncmap *p) { GEM_BUG_ON(p->height); - return (u32 *)(p + 1); + return p->seqno; } static inline struct i915_syncmap **__sync_child(struct i915_syncmap *p) { GEM_BUG_ON(!p->height); - return (struct i915_syncmap **)(p + 1); + return p->child; } static inline unsigned int @@ -200,7 +197,7 @@ __sync_alloc_leaf(struct i915_syncmap *parent, u64 id) { struct i915_syncmap *p; - p = kmalloc(sizeof(*p) + KSYNCMAP * sizeof(u32), GFP_KERNEL); + p = kmalloc(struct_size(p, seqno, KSYNCMAP), GFP_KERNEL); if (unlikely(!p)) return NULL; @@ -282,7 +279,7 @@ static noinline int __sync_set(struct i915_syncmap **root, u64 id, u32 seqno) unsigned int above; /* Insert a join above the current layer */ - next = kzalloc(sizeof(*next) + KSYNCMAP * sizeof(next), + next = kzalloc(struct_size(next, child, KSYNCMAP), GFP_KERNEL); if (unlikely(!next)) return -ENOMEM; -- cgit v1.2.3 From 599b0d8ce6edacc5991c552d68c6404b2e150cab Mon Sep 17 00:00:00 2001 From: Anirban Sk Date: Mon, 12 Feb 2024 10:37:38 +0530 Subject: drm/i915/selftests: Increasing the sleep time for live_rc6_manual Sometimes gt_pm live_rc6_manual selftest fails due to no power being measured for the rc6 disabled period. Therefore increasing the rc6 disable period from 250ms to 1000ms to rule out such sporadic failure. v3: - More descriptive and improved commit message (Anshuman) Signed-off-by: Anirban Sk Reviewed-by: Anshuman Gupta Signed-off-by: Anshuman Gupta Link: https://patchwork.freedesktop.org/patch/msgid/20240212050738.1162198-1-sk.anirban@intel.com --- drivers/gpu/drm/i915/gt/selftest_rc6.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c index a7189c2d660c..1aa1446c8fb0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rc6.c +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c @@ -62,12 +62,12 @@ int live_rc6_manual(void *arg) dt = ktime_get(); rc0_power = librapl_energy_uJ(); - msleep(250); + msleep(1000); rc0_power = librapl_energy_uJ() - rc0_power; dt = ktime_sub(ktime_get(), dt); res[1] = rc6_residency(rc6); if ((res[1] - res[0]) >> 10) { - pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n", + pr_err("RC6 residency increased by %lldus while disabled for 1000ms!\n", (res[1] - res[0]) >> 10); err = -EINVAL; goto out_unlock; -- cgit v1.2.3 From b112364867499e1327801da200868a6c506465fa Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 8 Feb 2024 08:25:10 +0000 Subject: drm/i915: Add GuC submission interface version query MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new query to the GuC submission interface version. Mesa intends to use this information to check for old firmware versions with a known bug where using the render and compute command streamers simultaneously can cause GPU hangs due issues in firmware scheduling. Based on patches from Vivaik and Joonas. Compile tested only. v2: * Added branch version. Signed-off-by: Tvrtko Ursulin Cc: Kenneth Graunke Cc: Jose Souza Cc: Sagar Ghuge Cc: Paulo Zanoni Cc: John Harrison Cc: Rodrigo Vivi Cc: Jani Nikula Cc: Tvrtko Ursulin Cc: Vivaik Balasubrawmanian Cc: Joonas Lahtinen Reviewed-by: José Roberto de Souza Tested-by: José Roberto de Souza Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20240208082510.1363268-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_query.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 00871ef99792..d4dba1240b40 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -551,6 +551,38 @@ static int query_hwconfig_blob(struct drm_i915_private *i915, return hwconfig->size; } +static int +query_guc_submission_version(struct drm_i915_private *i915, + struct drm_i915_query_item *query) +{ + struct drm_i915_query_guc_submission_version __user *query_ptr = + u64_to_user_ptr(query->data_ptr); + struct drm_i915_query_guc_submission_version ver; + struct intel_guc *guc = &to_gt(i915)->uc.guc; + const size_t size = sizeof(ver); + int ret; + + if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc)) + return -ENODEV; + + ret = copy_query_item(&ver, size, size, query); + if (ret != 0) + return ret; + + if (ver.branch || ver.major || ver.minor || ver.patch) + return -EINVAL; + + ver.branch = 0; + ver.major = guc->submission_version.major; + ver.minor = guc->submission_version.minor; + ver.patch = guc->submission_version.patch; + + if (copy_to_user(query_ptr, &ver, size)) + return -EFAULT; + + return 0; +} + static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, struct drm_i915_query_item *query_item) = { query_topology_info, @@ -559,6 +591,7 @@ static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, query_memregion_info, query_hwconfig_blob, query_geometry_subslices, + query_guc_submission_version, }; int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) -- cgit v1.2.3 From eb927f01dfb6309c8a184593c2c0618c4000c481 Mon Sep 17 00:00:00 2001 From: John Harrison Date: Wed, 10 Jan 2024 13:02:16 -0800 Subject: drm/i915/gt: Restart the heartbeat timer when forcing a pulse The context persistence code does things like send super high priority heartbeat pulses to ensure any leaked context can still be pre-empted and thus isn't a total denial of service but only a minor denial of service. Unfortunately, it wasn't bothering to restart the heartbeat worker with a fresh timeout. Thus, if a persistent context happened to be closed just before the heartbeat was going to go ping anyway then the forced pulse would get a negligble execution time. And as the forced pulse is super high priority, the worker thread's next step is a reset. Which means a potentially innocent system randomly goes boom when attempting to close a context. So, force a re-schedule of the worker thread with the appropriate timeout. Signed-off-by: John Harrison Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20240110210216.4125092-1-John.C.Harrison@Intel.com --- drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 1a8e2b7db013..4ae2fa0b61dd 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -290,6 +290,9 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine) heartbeat_commit(rq, &attr); GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER); + /* Ensure the forced pulse gets a full period to execute */ + next_heartbeat(engine); + return 0; } -- cgit v1.2.3