diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-21 14:56:17 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-21 14:56:17 -0800 |
| commit | 28eb75e178d389d325f1666e422bc13bbbb9804c (patch) | |
| tree | 20417b4e798f98fc5687e80c1e0126afcf437c70 /drivers/gpu/drm/xe/xe_hw_engine.c | |
| parent | 071b34dcf71523a559b6c39f5d21a268a9531b50 (diff) | |
| parent | a163b895077861598be48c1cf7f4a88413c28b22 (diff) | |
Merge tag 'drm-next-2024-11-21' of https://gitlab.freedesktop.org/drm/kernel
Pull drm updates from Dave Airlie:
"There's a lot of rework, the panic helper support is being added to
more drivers, v3d gets support for HW superpages, scheduler
documentation, drm client and video aperture reworks, some new
MAINTAINERS added, amdgpu has the usual lots of IP refactors, Intel
has some Pantherlake enablement and xe is getting some SRIOV bits, but
just lots of stuff everywhere.
core:
- split DSC helpers from DP helpers
- clang build fixes for drm/mm test
- drop simple pipeline support for gem vram
- document submission error signaling
- move drm_rect to drm core module from kms helper
- add default client setup to most drivers
- move to video aperture helpers instead of drm ones
tests:
- new framebuffer tests
ttm:
- remove swapped and pinned BOs from TTM lru
panic:
- fix uninit spinlock
- add ABGR2101010 support
bridge:
- add TI TDP158 support
- use standard PM OPS
dma-fence:
- use read_trylock instead of read_lock to help lockdep
scheduler:
- add errno to sched start to report different errors
- add locking to drm_sched_entity_modify_sched
- improve documentation
xe:
- add drm_line_printer
- lots of refactoring
- Enable Xe2 + PES disaggregation
- add new ARL PCI ID
- SRIOV development work
- fix exec unnecessary implicit fence
- define and parse OA sync props
- forcewake refactoring
i915:
- Enable BMG/LNL ultra joiner
- Enable 10bpx + CCS scanout on ICL+, fp16/CCS on TGL+
- use DSB for plane/color mgmt
- Arrow lake PCI IDs
- lots of i915/xe display refactoring
- enable PXP GuC autoteardown
- Pantherlake (PTL) Xe3 LPD display enablement
- Allow fastset HDR infoframe changes
- write DP source OUI for non-eDP sinks
- share PCI IDs between i915 and xe
amdgpu:
- SDMA queue reset support
- SMU 13.0.6, JPEG 4.0.3 updates
- Initial runtime repartitioning support
- rework IP structs for multiple IP instances
- Fetch EDID from _DDC if available
- SMU13 zero rpm user control
- lots of fixes/cleanups
amdkfd:
- Increase event FIFO size
- add topology cap flag for per queue reset
msm:
- DPU:
- SA8775P support
- (disabled by default) MSM8917, MSM8937, MSM8953 and MSM8996 support
- Enable large framebuffer support
- Drop MSM8998 and SDM845
- DP:
- SA8775P support
- GPU:
- a7xx preemption support
- Adreno A663 support
ast:
- warn about unsupported TX chips
ivpu:
- add coredump
- add pantherlake support
rockchip:
- 4K@60Hz display enablement
- generate pll programming tables
panthor:
- add timestamp query API
- add realtime group priority
- add fdinfo support
etnaviv:
- improve handling of DMA address limits
- improve GPU hangcheck
exynos:
- Decon Exynos7870 support
mediatek:
- add OF graph support
omap:
- locking fixes
bochs:
- convert to gem/shmem from simpledrm
v3d:
- support big/super pages
- add gemfs
vc4:
- BCM2712 support refactoring
- add YUV444 format support
udmabuf:
- folio related fixes
nouveau:
- add panic support on nv50+"
* tag 'drm-next-2024-11-21' of https://gitlab.freedesktop.org/drm/kernel: (1583 commits)
drm/xe/guc: Fix dereference before NULL check
drm/amd: Fix initialization mistake for NBIO 7.7.0
Revert "drm/amd/display: parse umc_info or vram_info based on ASIC"
drm/amd/display: Fix failure to read vram info due to static BP_RESULT
drm/amdgpu: enable GTT fallback handling for dGPUs only
drm/amd/amdgpu: limit single process inside MES
drm/fourcc: add AMD_FMT_MOD_TILE_GFX9_4K_D_X
drm/amdgpu/mes12: correct kiq unmap latency
drm/amdgpu: Support vcn and jpeg error info parsing
drm/amd : Update MES API header file for v11 & v12
drm/amd/amdkfd: add/remove kfd queues on start/stop KFD scheduling
drm/amdkfd: change kfd process kref count at creation
drm/amdgpu: Cleanup shift coding style
drm/amd/amdgpu: Increase MES log buffer to dump mes scratch data
drm/amdgpu: Implement virt req_ras_err_count
drm/amdgpu: VF Query RAS Caps from Host if supported
drm/amdgpu: Add msg handlers for SRIOV RAS Telemetry
drm/amdgpu: Update SRIOV Exchange Headers for RAS Telemetry Support
drm/amd/display: 3.2.309
drm/amd/display: Adjust VSDB parser for replay feature
...
Diffstat (limited to 'drivers/gpu/drm/xe/xe_hw_engine.c')
| -rw-r--r-- | drivers/gpu/drm/xe/xe_hw_engine.c | 307 |
1 files changed, 70 insertions, 237 deletions
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index c9c3beb3ce8d..1557acee3523 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -12,6 +12,7 @@ #include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" +#include "regs/xe_irq_regs.h" #include "xe_assert.h" #include "xe_bo.h" #include "xe_device.h" @@ -23,6 +24,7 @@ #include "xe_gt_printk.h" #include "xe_gt_mcr.h" #include "xe_gt_topology.h" +#include "xe_guc_capture.h" #include "xe_hw_engine_group.h" #include "xe_hw_fence.h" #include "xe_irq.h" @@ -295,7 +297,7 @@ void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe, reg.addr += hwe->mmio_base; - xe_mmio_write32(hwe->gt, reg, val); + xe_mmio_write32(&hwe->gt->mmio, reg, val); } /** @@ -315,7 +317,7 @@ u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg) reg.addr += hwe->mmio_base; - return xe_mmio_read32(hwe->gt, reg); + return xe_mmio_read32(&hwe->gt->mmio, reg); } void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe) @@ -324,7 +326,7 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe) xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE); if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask) - xe_mmio_write32(hwe->gt, RCU_MODE, + xe_mmio_write32(&hwe->gt->mmio, RCU_MODE, _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE)); xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0); @@ -354,7 +356,7 @@ static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt, hwe->class != XE_ENGINE_CLASS_RENDER) return false; - return xe_mmio_read32(hwe->gt, XEHP_FUSE4) & CFEG_WMTP_DISABLE; + return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE; } void @@ -460,6 +462,30 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe) xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr); } +static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance) +{ + const struct engine_info *info; + enum xe_hw_engine_id id; + + for (id = 0; id < XE_NUM_HW_ENGINES; ++id) { + info = &engine_infos[id]; + if (info->class == class && info->instance == instance) + return info; + } + + return NULL; +} + +static u16 get_msix_irq_offset(struct xe_gt *gt, enum xe_engine_class class) +{ + /* For MSI-X, hw engines report to offset of engine instance zero */ + const struct engine_info *info = find_engine_info(class, 0); + + xe_gt_assert(gt, info); + + return info ? info->irq_offset : 0; +} + static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe, enum xe_hw_engine_id id) { @@ -479,7 +505,9 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe, hwe->class = info->class; hwe->instance = info->instance; hwe->mmio_base = info->mmio_base; - hwe->irq_offset = info->irq_offset; + hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ? + get_msix_irq_offset(gt, info->class) : + info->irq_offset; hwe->domain = info->domain; hwe->name = info->name; hwe->fence_irq = >->fence_irq[info->class]; @@ -612,7 +640,7 @@ static void read_media_fuses(struct xe_gt *gt) xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); - media_fuse = xe_mmio_read32(gt, GT_VEBOX_VDBOX_DISABLE); + media_fuse = xe_mmio_read32(>->mmio, GT_VEBOX_VDBOX_DISABLE); /* * Pre-Xe_HP platforms had register bits representing absent engines, @@ -657,7 +685,7 @@ static void read_copy_fuses(struct xe_gt *gt) xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); - bcs_mask = xe_mmio_read32(gt, MIRROR_FUSE3); + bcs_mask = xe_mmio_read32(>->mmio, MIRROR_FUSE3); bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask); /* BCS0 is always present; only BCS1-BCS8 may be fused off */ @@ -704,7 +732,7 @@ static void read_compute_fuses_from_reg(struct xe_gt *gt) struct xe_device *xe = gt_to_xe(gt); u32 ccs_mask; - ccs_mask = xe_mmio_read32(gt, XEHP_FUSE4); + ccs_mask = xe_mmio_read32(>->mmio, XEHP_FUSE4); ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask); for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) { @@ -742,8 +770,8 @@ static void check_gsc_availability(struct xe_gt *gt) gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0); /* interrupts where previously enabled, so turn them off */ - xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, 0); - xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~0); + xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_ENABLE, 0); + xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_MASK, ~0); drm_info(&xe->drm, "gsccs disabled due to lack of FW\n"); } @@ -798,60 +826,10 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec) xe_hw_fence_irq_run(hwe->fence_irq); } -static bool -is_slice_common_per_gslice(struct xe_device *xe) -{ - return GRAPHICS_VERx100(xe) >= 1255; -} - -static void -xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe, - struct xe_hw_engine_snapshot *snapshot) -{ - struct xe_gt *gt = hwe->gt; - struct xe_device *xe = gt_to_xe(gt); - unsigned int dss; - u16 group, instance; - - snapshot->reg.instdone.ring = xe_hw_engine_mmio_read32(hwe, RING_INSTDONE(0)); - - if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER) - return; - - if (is_slice_common_per_gslice(xe) == false) { - snapshot->reg.instdone.slice_common[0] = - xe_mmio_read32(gt, SC_INSTDONE); - snapshot->reg.instdone.slice_common_extra[0] = - xe_mmio_read32(gt, SC_INSTDONE_EXTRA); - snapshot->reg.instdone.slice_common_extra2[0] = - xe_mmio_read32(gt, SC_INSTDONE_EXTRA2); - } else { - for_each_geometry_dss(dss, gt, group, instance) { - snapshot->reg.instdone.slice_common[dss] = - xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE, group, instance); - snapshot->reg.instdone.slice_common_extra[dss] = - xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE_EXTRA, group, instance); - snapshot->reg.instdone.slice_common_extra2[dss] = - xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE_EXTRA2, group, instance); - } - } - - for_each_geometry_dss(dss, gt, group, instance) { - snapshot->reg.instdone.sampler[dss] = - xe_gt_mcr_unicast_read(gt, SAMPLER_INSTDONE, group, instance); - snapshot->reg.instdone.row[dss] = - xe_gt_mcr_unicast_read(gt, ROW_INSTDONE, group, instance); - - if (GRAPHICS_VERx100(xe) >= 1255) - snapshot->reg.instdone.geom_svg[dss] = - xe_gt_mcr_unicast_read(gt, XEHPG_INSTDONE_GEOM_SVGUNIT, - group, instance); - } -} - /** * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine. * @hwe: Xe HW Engine. + * @job: The job object. * * This can be printed out in a later stage like during dev_coredump * analysis. @@ -860,11 +838,10 @@ xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe, * caller, using `xe_hw_engine_snapshot_free`. */ struct xe_hw_engine_snapshot * -xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) +xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job) { struct xe_hw_engine_snapshot *snapshot; - size_t len; - u64 val; + struct __guc_capture_parsed_output *node; if (!xe_hw_engine_is_valid(hwe)) return NULL; @@ -874,28 +851,6 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) if (!snapshot) return NULL; - /* Because XE_MAX_DSS_FUSE_BITS is defined in xe_gt_types.h and it - * includes xe_hw_engine_types.h the length of this 3 registers can't be - * set in struct xe_hw_engine_snapshot, so here doing additional - * allocations. - */ - len = (XE_MAX_DSS_FUSE_BITS * sizeof(u32)); - snapshot->reg.instdone.slice_common = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.slice_common_extra = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.slice_common_extra2 = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.sampler = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.row = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.geom_svg = kzalloc(len, GFP_ATOMIC); - if (!snapshot->reg.instdone.slice_common || - !snapshot->reg.instdone.slice_common_extra || - !snapshot->reg.instdone.slice_common_extra2 || - !snapshot->reg.instdone.sampler || - !snapshot->reg.instdone.row || - !snapshot->reg.instdone.geom_svg) { - xe_hw_engine_snapshot_free(snapshot); - return NULL; - } - snapshot->name = kstrdup(hwe->name, GFP_ATOMIC); snapshot->hwe = hwe; snapshot->logical_instance = hwe->logical_instance; @@ -903,157 +858,32 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt), hwe->domain); snapshot->mmio_base = hwe->mmio_base; + snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe); /* no more VF accessible data below this point */ if (IS_SRIOV_VF(gt_to_xe(hwe->gt))) return snapshot; - snapshot->reg.ring_execlist_status = - xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0)); - snapshot->reg.ring_execlist_status |= val << 32; - - snapshot->reg.ring_execlist_sq_contents = - xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0)); - snapshot->reg.ring_execlist_sq_contents |= val << 32; - - snapshot->reg.ring_acthd = xe_hw_engine_mmio_read32(hwe, RING_ACTHD(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0)); - snapshot->reg.ring_acthd |= val << 32; - - snapshot->reg.ring_bbaddr = xe_hw_engine_mmio_read32(hwe, RING_BBADDR(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0)); - snapshot->reg.ring_bbaddr |= val << 32; - - snapshot->reg.ring_dma_fadd = - xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0)); - snapshot->reg.ring_dma_fadd |= val << 32; - - snapshot->reg.ring_hwstam = xe_hw_engine_mmio_read32(hwe, RING_HWSTAM(0)); - snapshot->reg.ring_hws_pga = xe_hw_engine_mmio_read32(hwe, RING_HWS_PGA(0)); - snapshot->reg.ring_start = xe_hw_engine_mmio_read32(hwe, RING_START(0)); - if (GRAPHICS_VERx100(hwe->gt->tile->xe) >= 2000) { - val = xe_hw_engine_mmio_read32(hwe, RING_START_UDW(0)); - snapshot->reg.ring_start |= val << 32; - } - if (xe_gt_has_indirect_ring_state(hwe->gt)) { - snapshot->reg.indirect_ring_state = - xe_hw_engine_mmio_read32(hwe, INDIRECT_RING_STATE(0)); - } - - snapshot->reg.ring_head = - xe_hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR; - snapshot->reg.ring_tail = - xe_hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR; - snapshot->reg.ring_ctl = xe_hw_engine_mmio_read32(hwe, RING_CTL(0)); - snapshot->reg.ring_mi_mode = - xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0)); - snapshot->reg.ring_mode = xe_hw_engine_mmio_read32(hwe, RING_MODE(0)); - snapshot->reg.ring_imr = xe_hw_engine_mmio_read32(hwe, RING_IMR(0)); - snapshot->reg.ring_esr = xe_hw_engine_mmio_read32(hwe, RING_ESR(0)); - snapshot->reg.ring_emr = xe_hw_engine_mmio_read32(hwe, RING_EMR(0)); - snapshot->reg.ring_eir = xe_hw_engine_mmio_read32(hwe, RING_EIR(0)); - snapshot->reg.ipehr = xe_hw_engine_mmio_read32(hwe, RING_IPEHR(0)); - xe_hw_engine_snapshot_instdone_capture(hwe, snapshot); - - if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE) - snapshot->reg.rcu_mode = xe_mmio_read32(hwe->gt, RCU_MODE); - - return snapshot; -} - -static void -xe_hw_engine_snapshot_instdone_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p) -{ - struct xe_gt *gt = snapshot->hwe->gt; - struct xe_device *xe = gt_to_xe(gt); - u16 group, instance; - unsigned int dss; - - drm_printf(p, "\tRING_INSTDONE: 0x%08x\n", snapshot->reg.instdone.ring); - - if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER) - return; - - if (is_slice_common_per_gslice(xe) == false) { - drm_printf(p, "\tSC_INSTDONE[0]: 0x%08x\n", - snapshot->reg.instdone.slice_common[0]); - drm_printf(p, "\tSC_INSTDONE_EXTRA[0]: 0x%08x\n", - snapshot->reg.instdone.slice_common_extra[0]); - drm_printf(p, "\tSC_INSTDONE_EXTRA2[0]: 0x%08x\n", - snapshot->reg.instdone.slice_common_extra2[0]); - } else { - for_each_geometry_dss(dss, gt, group, instance) { - drm_printf(p, "\tSC_INSTDONE[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.slice_common[dss]); - drm_printf(p, "\tSC_INSTDONE_EXTRA[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.slice_common_extra[dss]); - drm_printf(p, "\tSC_INSTDONE_EXTRA2[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.slice_common_extra2[dss]); + if (job) { + /* If got guc capture, set source to GuC */ + node = xe_guc_capture_get_matching_and_lock(job); + if (node) { + struct xe_device *xe = gt_to_xe(hwe->gt); + struct xe_devcoredump *coredump = &xe->devcoredump; + + coredump->snapshot.matched_node = node; + snapshot->source = XE_ENGINE_CAPTURE_SOURCE_GUC; + xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node"); + return snapshot; } } - for_each_geometry_dss(dss, gt, group, instance) { - drm_printf(p, "\tSAMPLER_INSTDONE[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.sampler[dss]); - drm_printf(p, "\tROW_INSTDONE[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.row[dss]); - - if (GRAPHICS_VERx100(xe) >= 1255) - drm_printf(p, "\tINSTDONE_GEOM_SVGUNIT[%u]: 0x%08x\n", - dss, snapshot->reg.instdone.geom_svg[dss]); - } -} + /* otherwise, do manual capture */ + xe_engine_manual_capture(hwe, snapshot); + snapshot->source = XE_ENGINE_CAPTURE_SOURCE_MANUAL; + xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot"); -/** - * xe_hw_engine_snapshot_print - Print out a given Xe HW Engine snapshot. - * @snapshot: Xe HW Engine snapshot object. - * @p: drm_printer where it will be printed out. - * - * This function prints out a given Xe HW Engine snapshot object. - */ -void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, - struct drm_printer *p) -{ - if (!snapshot) - return; - - drm_printf(p, "%s (physical), logical instance=%d\n", - snapshot->name ? snapshot->name : "", - snapshot->logical_instance); - drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n", - snapshot->forcewake.domain, snapshot->forcewake.ref); - drm_printf(p, "\tHWSTAM: 0x%08x\n", snapshot->reg.ring_hwstam); - drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", snapshot->reg.ring_hws_pga); - drm_printf(p, "\tRING_EXECLIST_STATUS: 0x%016llx\n", - snapshot->reg.ring_execlist_status); - drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS: 0x%016llx\n", - snapshot->reg.ring_execlist_sq_contents); - drm_printf(p, "\tRING_START: 0x%016llx\n", snapshot->reg.ring_start); - drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head); - drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail); - drm_printf(p, "\tRING_CTL: 0x%08x\n", snapshot->reg.ring_ctl); - drm_printf(p, "\tRING_MI_MODE: 0x%08x\n", snapshot->reg.ring_mi_mode); - drm_printf(p, "\tRING_MODE: 0x%08x\n", - snapshot->reg.ring_mode); - drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr); - drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr); - drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr); - drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir); - drm_printf(p, "\tACTHD: 0x%016llx\n", snapshot->reg.ring_acthd); - drm_printf(p, "\tBBADDR: 0x%016llx\n", snapshot->reg.ring_bbaddr); - drm_printf(p, "\tDMA_FADDR: 0x%016llx\n", snapshot->reg.ring_dma_fadd); - drm_printf(p, "\tINDIRECT_RING_STATE: 0x%08x\n", - snapshot->reg.indirect_ring_state); - drm_printf(p, "\tIPEHR: 0x%08x\n", snapshot->reg.ipehr); - xe_hw_engine_snapshot_instdone_print(snapshot, p); - - if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE) - drm_printf(p, "\tRCU_MODE: 0x%08x\n", - snapshot->reg.rcu_mode); - drm_puts(p, "\n"); + return snapshot; } /** @@ -1065,15 +895,18 @@ void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, */ void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot) { + struct xe_gt *gt; if (!snapshot) return; - kfree(snapshot->reg.instdone.slice_common); - kfree(snapshot->reg.instdone.slice_common_extra); - kfree(snapshot->reg.instdone.slice_common_extra2); - kfree(snapshot->reg.instdone.sampler); - kfree(snapshot->reg.instdone.row); - kfree(snapshot->reg.instdone.geom_svg); + gt = snapshot->hwe->gt; + /* + * xe_guc_capture_put_matched_nodes is called here and from + * xe_devcoredump_snapshot_free, to cover the 2 calling paths + * of hw_engines - debugfs and devcoredump free. + */ + xe_guc_capture_put_matched_nodes(>->uc.guc); + kfree(snapshot->name); kfree(snapshot); } @@ -1089,8 +922,8 @@ void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p) { struct xe_hw_engine_snapshot *snapshot; - snapshot = xe_hw_engine_snapshot_capture(hwe); - xe_hw_engine_snapshot_print(snapshot, p); + snapshot = xe_hw_engine_snapshot_capture(hwe, NULL); + xe_engine_snapshot_print(snapshot, p); xe_hw_engine_snapshot_free(snapshot); } @@ -1150,7 +983,7 @@ const char *xe_hw_engine_class_to_str(enum xe_engine_class class) u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe) { - return xe_mmio_read64_2x32(hwe->gt, RING_TIMESTAMP(hwe->mmio_base)); + return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base)); } enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe) |
