diff options
Diffstat (limited to 'drivers/gpu')
81 files changed, 3159 insertions, 1134 deletions
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index 735bfdf4322f..7ff81aa0a1ca 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -981,6 +981,40 @@ static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm) #endif /** + * drm_gpusvm_find_vma_start() - Find start address for first VMA in range + * @gpusvm: Pointer to the GPU SVM structure + * @start: The inclusive start user address. + * @end: The exclusive end user address. + * + * Returns: The start address of first VMA within the provided range, + * ULONG_MAX otherwise. Assumes start_addr < end_addr. + */ +unsigned long +drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm, + unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = gpusvm->mm; + struct vm_area_struct *vma; + unsigned long addr = ULONG_MAX; + + if (!mmget_not_zero(mm)) + return addr; + + mmap_read_lock(mm); + + vma = find_vma_intersection(mm, start, end); + if (vma) + addr = vma->vm_start; + + mmap_read_unlock(mm); + mmput(mm); + + return addr; +} +EXPORT_SYMBOL_GPL(drm_gpusvm_find_vma_start); + +/** * drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range * @gpusvm: Pointer to the GPU SVM structure * @fault_addr: Fault address diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index fcc2677a4229..30ed74ad29ab 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_XE - tristate "Intel Xe Graphics" + tristate "Intel Xe2 Graphics" depends on DRM && PCI && (m || (y && KUNIT=y)) depends on INTEL_VSEC || !INTEL_VSEC depends on X86_PLATFORM_DEVICES || !(X86 && ACPI) @@ -31,7 +31,6 @@ config DRM_XE select ACPI_VIDEO if X86 && ACPI select ACPI_WMI if X86 && ACPI select SYNC_FILE - select IOSF_MBI select CRC32 select SND_HDA_I915 if SND_HDA_CORE select CEC_CORE if CEC_NOTIFIER @@ -46,7 +45,8 @@ config DRM_XE select AUXILIARY_BUS select HMM_MIRROR help - Experimental driver for Intel Xe series GPUs + Driver for Intel Xe2 series GPUs and later. Experimental support + for Xe series is also available. If "M" is selected, the module will be called xe. diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug index 0d749ed44878..01735c6ece8b 100644 --- a/drivers/gpu/drm/xe/Kconfig.debug +++ b/drivers/gpu/drm/xe/Kconfig.debug @@ -86,12 +86,17 @@ config DRM_XE_KUNIT_TEST If in doubt, say "N". -config DRM_XE_LARGE_GUC_BUFFER - bool "Enable larger guc log buffer" +config DRM_XE_DEBUG_GUC + bool "Enable extra GuC related debug options" + depends on DRM_XE_DEBUG default n + select STACKDEPOT help Choose this option when debugging guc issues. - Buffer should be large enough for complex issues. + The GuC log buffer is increased to the maximum allowed, which should + be large enough for complex issues. The tracking of FAST_REQ messages + is extended to include a record of the calling stack, which is then + dumped on a FAST_REQ error notification. Recommended for driver developers only. diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index e4bf484d4121..f5f5775acdc0 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -139,7 +139,8 @@ xe-y += \ xe_guc_relay.o \ xe_memirq.o \ xe_sriov.o \ - xe_sriov_vf.o + xe_sriov_vf.o \ + xe_tile_sriov_vf.o xe-$(CONFIG_PCI_IOV) += \ xe_gt_sriov_pf.o \ diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h index 448afb86e05c..ff4f412c28d8 100644 --- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h @@ -161,6 +161,37 @@ enum xe_guc_preempt_options { XE_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8, }; +enum xe_guc_register_context_param_offsets { + XE_GUC_REGISTER_CONTEXT_DATA_0_MBZ = 0, + XE_GUC_REGISTER_CONTEXT_DATA_1_FLAGS, + XE_GUC_REGISTER_CONTEXT_DATA_2_CONTEXT_INDEX, + XE_GUC_REGISTER_CONTEXT_DATA_3_ENGINE_CLASS, + XE_GUC_REGISTER_CONTEXT_DATA_4_ENGINE_SUBMIT_MASK, + XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER, + XE_GUC_REGISTER_CONTEXT_DATA_6_WQ_DESC_ADDR_UPPER, + XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER, + XE_GUC_REGISTER_CONTEXT_DATA_8_WQ_BUF_BASE_UPPER, + XE_GUC_REGISTER_CONTEXT_DATA_9_WQ_BUF_SIZE, + XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR, + XE_GUC_REGISTER_CONTEXT_MSG_LEN, +}; + +enum xe_guc_register_context_multi_lrc_param_offsets { + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_0_MBZ = 0, + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_1_FLAGS, + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_2_PARENT_CONTEXT, + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_3_ENGINE_CLASS, + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_4_ENGINE_SUBMIT_MASK, + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER, + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_6_WQ_DESC_ADDR_UPPER, + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER, + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_8_WQ_BUF_BASE_UPPER, + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_9_WQ_BUF_SIZE, + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS, + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR, + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN = 11, +}; + enum xe_guc_report_status { XE_GUC_REPORT_STATUS_UNKNOWN = 0x0, XE_GUC_REPORT_STATUS_ACKED = 0x1, diff --git a/drivers/gpu/drm/xe/abi/guc_errors_abi.h b/drivers/gpu/drm/xe/abi/guc_errors_abi.h index 2c627a21648f..ecf748fd87df 100644 --- a/drivers/gpu/drm/xe/abi/guc_errors_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_errors_abi.h @@ -6,8 +6,7 @@ #ifndef _ABI_GUC_ERRORS_ABI_H #define _ABI_GUC_ERRORS_ABI_H -enum xe_guc_response_status { - XE_GUC_RESPONSE_STATUS_SUCCESS = 0x0, +enum xe_guc_response { XE_GUC_RESPONSE_ERROR_PROTOCOL = 0x04, XE_GUC_RESPONSE_INVALID_STATE = 0x0A, XE_GUC_RESPONSE_UNSUPPORTED_VERSION = 0x0B, @@ -21,12 +20,20 @@ enum xe_guc_response_status { XE_GUC_RESPONSE_CANNOT_COMPLETE_ACTION = 0x41, XE_GUC_RESPONSE_INVALID_KLV_DATA = 0x50, XE_GUC_RESPONSE_INVALID_PARAMS = 0x60, + XE_GUC_RESPONSE_INVALID_CONTEXT_INDEX = 0x61, + XE_GUC_RESPONSE_INVALID_CONTEXT_REGISTRATION = 0x62, + XE_GUC_RESPONSE_INVALID_DOORBELL_ID = 0x63, + XE_GUC_RESPONSE_INVALID_ENGINE_ID = 0x64, XE_GUC_RESPONSE_INVALID_BUFFER_RANGE = 0x70, XE_GUC_RESPONSE_INVALID_BUFFER = 0x71, + XE_GUC_RESPONSE_BUFFER_ALREADY_REGISTERED = 0x72, XE_GUC_RESPONSE_INVALID_GGTT_ADDRESS = 0x80, XE_GUC_RESPONSE_PENDING_ACTION = 0x90, + XE_GUC_RESPONSE_CONTEXT_NOT_REGISTERED = 0x100, + XE_GUC_RESPONSE_CONTEXT_ALREADY_REGISTERED = 0X101, XE_GUC_RESPONSE_INVALID_SIZE = 0x102, XE_GUC_RESPONSE_MALFORMED_KLV = 0x103, + XE_GUC_RESPONSE_INVALID_CONTEXT = 0x104, XE_GUC_RESPONSE_INVALID_KLV_KEY = 0x105, XE_GUC_RESPONSE_DATA_TOO_LARGE = 0x106, XE_GUC_RESPONSE_VF_MIGRATED = 0x107, @@ -40,10 +47,11 @@ enum xe_guc_response_status { XE_GUC_RESPONSE_CTB_NOT_REGISTERED = 0x304, XE_GUC_RESPONSE_CTB_IN_USE = 0x305, XE_GUC_RESPONSE_CTB_INVALID_DESC = 0x306, + XE_GUC_RESPONSE_HW_TIMEOUT = 0x30C, XE_GUC_RESPONSE_CTB_SOURCE_INVALID_DESCRIPTOR = 0x30D, XE_GUC_RESPONSE_CTB_DESTINATION_INVALID_DESCRIPTOR = 0x30E, XE_GUC_RESPONSE_INVALID_CONFIG_STATE = 0x30F, - XE_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000, + XE_GUC_RESPONSE_GENERIC_FAIL = 0xF000, }; enum xe_guc_load_status { diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index d918ae1c8061..9059b56bc23c 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -23,6 +23,7 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ struct xe_device *xe = xe_bo_device(bo); struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; u32 column, row; + u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]); /* TODO: Maybe rewrite so we can traverse the bo addresses sequentially, * by writing dpt/ggtt in a different order? @@ -32,10 +33,9 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ u32 src_idx = src_stride * (height - 1) + column + bo_ofs; for (row = 0; row < height; row++) { - u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_NONE]); + u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE); - iosys_map_wr(map, *dpt_ofs, u64, pte); + iosys_map_wr(map, *dpt_ofs, u64, pte | addr); *dpt_ofs += 8; src_idx -= src_stride; } @@ -55,17 +55,15 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, { struct xe_device *xe = xe_bo_device(bo); struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; - u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index) - = ggtt->pt_ops->pte_encode_bo; u32 column, row; + u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]); for (row = 0; row < height; row++) { u32 src_idx = src_stride * row + bo_ofs; for (column = 0; column < width; column++) { - iosys_map_wr(map, *dpt_ofs, u64, - pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_NONE])); + u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE); + iosys_map_wr(map, *dpt_ofs, u64, pte | addr); *dpt_ofs += 8; src_idx++; @@ -129,13 +127,13 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, return PTR_ERR(dpt); if (view->type == I915_GTT_VIEW_NORMAL) { + u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]); u32 x; for (x = 0; x < size / XE_PAGE_SIZE; x++) { - u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_NONE]); + u64 addr = xe_bo_addr(bo, x * XE_PAGE_SIZE, XE_PAGE_SIZE); - iosys_map_wr(&dpt->vmap, x * 8, u64, pte); + iosys_map_wr(&dpt->vmap, x * 8, u64, pte | addr); } } else if (view->type == I915_GTT_VIEW_REMAPPED) { const struct intel_remapped_info *remap_info = &view->remapped; @@ -173,15 +171,15 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo { struct xe_device *xe = xe_bo_device(bo); u32 column, row; + u64 pte = ggtt->pt_ops->pte_encode_flags(bo, xe->pat.idx[XE_CACHE_NONE]); for (column = 0; column < width; column++) { u32 src_idx = src_stride * (height - 1) + column + bo_ofs; for (row = 0; row < height; row++) { - u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_NONE]); + u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE); - ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte); + ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte | addr); *ggtt_ofs += XE_PAGE_SIZE; src_idx -= src_stride; } @@ -199,14 +197,15 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, struct drm_gem_object *obj = intel_fb_bo(&fb->base); struct xe_bo *bo = gem_to_xe_bo(obj); struct xe_device *xe = to_xe_device(fb->base.dev); - struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; + struct xe_tile *tile0 = xe_device_get_root_tile(xe); + struct xe_ggtt *ggtt = tile0->mem.ggtt; u32 align; int ret; /* TODO: Consider sharing framebuffer mapping? * embed i915_vma inside intel_framebuffer */ - xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile)); + xe_pm_runtime_get_noresume(xe); ret = mutex_lock_interruptible(&ggtt->lock); if (ret) goto out; @@ -215,29 +214,22 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) align = max_t(u32, align, SZ_64K); - if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) { - vma->node = bo->ggtt_node[ggtt->tile->id]; + if (bo->ggtt_node[tile0->id] && view->type == I915_GTT_VIEW_NORMAL) { + vma->node = bo->ggtt_node[tile0->id]; } else if (view->type == I915_GTT_VIEW_NORMAL) { - u32 x, size = bo->ttm.base.size; - vma->node = xe_ggtt_node_init(ggtt); if (IS_ERR(vma->node)) { ret = PTR_ERR(vma->node); goto out_unlock; } - ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0); + ret = xe_ggtt_node_insert_locked(vma->node, bo->size, align, 0); if (ret) { xe_ggtt_node_fini(vma->node); goto out_unlock; } - for (x = 0; x < size; x += XE_PAGE_SIZE) { - u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x, - xe->pat.idx[XE_CACHE_NONE]); - - ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node->base.start + x, pte); - } + xe_ggtt_map_bo(ggtt, vma->node, bo, xe->pat.idx[XE_CACHE_NONE]); } else { u32 i, ggtt_ofs; const struct intel_rotation_info *rot_info = &view->rotated; @@ -271,7 +263,7 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, out_unlock: mutex_unlock(&ggtt->lock); out: - xe_pm_runtime_put(tile_to_xe(ggtt->tile)); + xe_pm_runtime_put(xe); return ret; } @@ -348,7 +340,7 @@ err: static void __xe_unpin_fb_vma(struct i915_vma *vma) { - u8 tile_id = vma->node->ggtt->tile->id; + u8 tile_id = xe_device_get_root_tile(xe_bo_device(vma->bo))->id; if (!refcount_dec_and_test(&vma->ref)) return; diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index 6502b8274173..59b2ff2026d4 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -87,12 +87,8 @@ initial_plane_bo(struct xe_device *xe, base = round_down(plane_config->base, page_size); if (IS_DGFX(xe)) { - u64 __iomem *gte = tile0->mem.ggtt->gsm; - u64 pte; + u64 pte = xe_ggtt_read_pte(tile0->mem.ggtt, base); - gte += base / XE_PAGE_SIZE; - - pte = ioread64(gte); if (!(pte & XE_GGTT_PTE_DM)) { drm_err(&xe->drm, "Initial plane programming missing DM bit\n"); diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h index 5394a1373a6b..ef2bf984723f 100644 --- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h @@ -40,6 +40,7 @@ #define PCU_CR_PACKAGE_RAPL_LIMIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0) #define PWR_LIM_VAL REG_GENMASK(14, 0) #define PWR_LIM_EN REG_BIT(15) +#define PWR_LIM REG_GENMASK(15, 0) #define PWR_LIM_TIME REG_GENMASK(23, 17) #define PWR_LIM_TIME_X REG_GENMASK(23, 22) #define PWR_LIM_TIME_Y REG_GENMASK(21, 17) diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h index a79ad2da070c..e693a50706f8 100644 --- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h @@ -97,4 +97,7 @@ #define OAM_STATUS(base) XE_REG((base) + OAM_STATUS_OFFSET) #define OAM_MMIO_TRG(base) XE_REG((base) + OAM_MMIO_TRG_OFFSET) +#define OAM_COMPRESSION_T3_CONTROL XE_REG(0x1c2e00) +#define OAM_LAT_MEASURE_ENABLE REG_BIT(4) + #endif diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h index c556a04670ee..fb097607b86c 100644 --- a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h @@ -18,12 +18,10 @@ #define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c) #define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080) -#define BMG_PACKAGE_ENERGY_STATUS XE_REG(0x138120) #define BMG_FAN_1_SPEED XE_REG(0x138140) #define BMG_FAN_2_SPEED XE_REG(0x138170) #define BMG_FAN_3_SPEED XE_REG(0x1381a0) #define BMG_VRAM_TEMPERATURE XE_REG(0x1382c0) #define BMG_PACKAGE_TEMPERATURE XE_REG(0x138434) -#define BMG_PLATFORM_ENERGY_STATUS XE_REG(0x138458) #endif /* _XE_PCODE_REGS_H_ */ diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h index f45abcd96ba8..b0efd9b48d1e 100644 --- a/drivers/gpu/drm/xe/regs/xe_pmt.h +++ b/drivers/gpu/drm/xe/regs/xe_pmt.h @@ -10,6 +10,11 @@ #define BMG_PMT_BASE_OFFSET 0xDB000 #define BMG_DISCOVERY_OFFSET (SOC_BASE + BMG_PMT_BASE_OFFSET) +#define PUNIT_TELEMETRY_GUID XE_REG(BMG_DISCOVERY_OFFSET + 0x4) +#define BMG_ENERGY_STATUS_PMT_OFFSET (0x30) +#define ENERGY_PKG REG_GENMASK64(31, 0) +#define ENERGY_CARD REG_GENMASK64(63, 32) + #define BMG_TELEMETRY_BASE_OFFSET 0xE0000 #define BMG_TELEMETRY_OFFSET (SOC_BASE + BMG_TELEMETRY_BASE_OFFSET) diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index 378dcd0fb414..77ca1ab527ec 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -514,9 +514,9 @@ static int shrink_test_run_device(struct xe_device *xe) * other way around, they may not be subject to swapping... */ if (alloced < purgeable) { - xe_ttm_tt_account_subtract(&xe_tt->ttm); + xe_ttm_tt_account_subtract(xe, &xe_tt->ttm); xe_tt->purgeable = true; - xe_ttm_tt_account_add(&xe_tt->ttm); + xe_ttm_tt_account_add(xe, &xe_tt->ttm); bo->ttm.priority = 0; spin_lock(&bo->ttm.bdev->lru_lock); ttm_bo_move_to_lru_tail(&bo->ttm); diff --git a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c index 6faffcd74869..537766cdd882 100644 --- a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c +++ b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c @@ -42,10 +42,8 @@ static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device * KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo->ggtt_node[tile->id]); KUNIT_ASSERT_EQ(test, 0, - drm_mm_insert_node_in_range(&ggtt->mm, - &bo->ggtt_node[tile->id]->base, - bo->size, SZ_4K, - 0, 0, U64_MAX, 0)); + xe_ggtt_node_insert(bo->ggtt_node[tile->id], + bo->size, SZ_4K)); } return bo; @@ -67,8 +65,9 @@ static int guc_buf_test_init(struct kunit *test) ggtt = xe_device_get_root_tile(test->priv)->mem.ggtt; guc = &xe_device_get_gt(test->priv, 0)->uc.guc; - drm_mm_init(&ggtt->mm, DUT_GGTT_START, DUT_GGTT_SIZE); - mutex_init(&ggtt->lock); + KUNIT_ASSERT_EQ(test, 0, + xe_ggtt_init_kunit(ggtt, DUT_GGTT_START, + DUT_GGTT_START + DUT_GGTT_SIZE)); kunit_activate_static_stub(test, xe_managed_bo_create_pin_map, replacement_xe_managed_bo_create_pin_map); diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c index 1d3e2e50c355..baccb657bd05 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci.c +++ b/drivers/gpu/drm/xe/tests/xe_pci.c @@ -12,49 +12,48 @@ #include <kunit/test-bug.h> #include <kunit/visibility.h> +static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u.%02u %s", + param->verx100 / 100, param->verx100 % 100, param->name); +} + +KUNIT_ARRAY_PARAM(graphics_ip, graphics_ips, xe_ip_kunit_desc); +KUNIT_ARRAY_PARAM(media_ip, media_ips, xe_ip_kunit_desc); + /** - * xe_call_for_each_graphics_ip - Iterate over all recognized graphics IPs - * @xe_fn: Function to call for each device. + * xe_pci_graphics_ip_gen_param - Generate graphics struct xe_ip parameters + * @prev: the pointer to the previous parameter to iterate from or NULL + * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE * - * This function iterates over the descriptors for all graphics IPs recognized - * by the driver and calls @xe_fn: for each one of them. + * This function prepares struct xe_ip parameter. + * + * To be used only as a parameter generator function in &KUNIT_CASE_PARAM. + * + * Return: pointer to the next parameter or NULL if no more parameters */ -void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn) +const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc) { - const struct xe_graphics_desc *desc, *last = NULL; - - for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) { - desc = graphics_ips[i].desc; - if (desc == last) - continue; - - xe_fn(desc); - last = desc; - } + return graphics_ip_gen_params(prev, desc); } -EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip); +EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param); /** - * xe_call_for_each_media_ip - Iterate over all recognized media IPs - * @xe_fn: Function to call for each device. + * xe_pci_media_ip_gen_param - Generate media struct xe_ip parameters + * @prev: the pointer to the previous parameter to iterate from or NULL + * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE + * + * This function prepares struct xe_ip parameter. + * + * To be used only as a parameter generator function in &KUNIT_CASE_PARAM. * - * This function iterates over the descriptors for all media IPs recognized - * by the driver and calls @xe_fn: for each one of them. + * Return: pointer to the next parameter or NULL if no more parameters */ -void xe_call_for_each_media_ip(xe_media_fn xe_fn) +const void *xe_pci_media_ip_gen_param(const void *prev, char *desc) { - const struct xe_media_desc *desc, *last = NULL; - - for (int i = 0; i < ARRAY_SIZE(media_ips); i++) { - desc = media_ips[i].desc; - if (desc == last) - continue; - - xe_fn(desc); - last = desc; - } + return media_ip_gen_params(prev, desc); } -EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_media_ip); +EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param); static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c index 744a37583d2d..95fed41f7ff2 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci_test.c +++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c @@ -14,9 +14,10 @@ #include "xe_pci_test.h" #include "xe_pci_types.h" -static void check_graphics_ip(const struct xe_graphics_desc *graphics) +static void check_graphics_ip(struct kunit *test) { - struct kunit *test = kunit_get_current_test(); + const struct xe_ip *param = test->param_value; + const struct xe_graphics_desc *graphics = param->desc; u64 mask = graphics->hw_engine_mask; /* RCS, CCS, and BCS engines are allowed on the graphics IP */ @@ -28,9 +29,10 @@ static void check_graphics_ip(const struct xe_graphics_desc *graphics) KUNIT_ASSERT_EQ(test, mask, 0); } -static void check_media_ip(const struct xe_media_desc *media) +static void check_media_ip(struct kunit *test) { - struct kunit *test = kunit_get_current_test(); + const struct xe_ip *param = test->param_value; + const struct xe_media_desc *media = param->desc; u64 mask = media->hw_engine_mask; /* VCS, VECS and GSCCS engines are allowed on the media IP */ @@ -42,19 +44,9 @@ static void check_media_ip(const struct xe_media_desc *media) KUNIT_ASSERT_EQ(test, mask, 0); } -static void xe_gmdid_graphics_ip(struct kunit *test) -{ - xe_call_for_each_graphics_ip(check_graphics_ip); -} - -static void xe_gmdid_media_ip(struct kunit *test) -{ - xe_call_for_each_media_ip(check_media_ip); -} - static struct kunit_case xe_pci_tests[] = { - KUNIT_CASE(xe_gmdid_graphics_ip), - KUNIT_CASE(xe_gmdid_media_ip), + KUNIT_CASE_PARAM(check_graphics_ip, xe_pci_graphics_ip_gen_param), + KUNIT_CASE_PARAM(check_media_ip, xe_pci_media_ip_gen_param), {} }; diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h index ede46800aff1..3a1df7a5e291 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci_test.h +++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h @@ -12,15 +12,6 @@ #include "xe_sriov_types.h" struct xe_device; -struct xe_graphics_desc; -struct xe_media_desc; - -typedef int (*xe_device_fn)(struct xe_device *); -typedef void (*xe_graphics_fn)(const struct xe_graphics_desc *); -typedef void (*xe_media_fn)(const struct xe_media_desc *); - -void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn); -void xe_call_for_each_media_ip(xe_media_fn xe_fn); struct xe_pci_fake_data { enum xe_sriov_mode sriov_mode; @@ -34,6 +25,8 @@ struct xe_pci_fake_data { int xe_pci_fake_device_init(struct xe_device *xe); +const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc); +const void *xe_pci_media_ip_gen_param(const void *prev, char *desc); const void *xe_pci_live_device_gen_param(const void *prev, char *desc); #endif diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 7aa2c17825da..4e39188a021a 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -336,15 +336,13 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo, /* struct xe_ttm_tt - Subclassed ttm_tt for xe */ struct xe_ttm_tt { struct ttm_tt ttm; - /** @xe - The xe device */ - struct xe_device *xe; struct sg_table sgt; struct sg_table *sg; /** @purgeable: Whether the content of the pages of @ttm is purgeable. */ bool purgeable; }; -static int xe_tt_map_sg(struct ttm_tt *tt) +static int xe_tt_map_sg(struct xe_device *xe, struct ttm_tt *tt) { struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); unsigned long num_pages = tt->num_pages; @@ -359,13 +357,13 @@ static int xe_tt_map_sg(struct ttm_tt *tt) ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, num_pages, 0, (u64)num_pages << PAGE_SHIFT, - xe_sg_segment_size(xe_tt->xe->drm.dev), + xe_sg_segment_size(xe->drm.dev), GFP_KERNEL); if (ret) return ret; xe_tt->sg = &xe_tt->sgt; - ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, + ret = dma_map_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); if (ret) { sg_free_table(xe_tt->sg); @@ -376,12 +374,12 @@ static int xe_tt_map_sg(struct ttm_tt *tt) return 0; } -static void xe_tt_unmap_sg(struct ttm_tt *tt) +static void xe_tt_unmap_sg(struct xe_device *xe, struct ttm_tt *tt) { struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); if (xe_tt->sg) { - dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, + dma_unmap_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, 0); sg_free_table(xe_tt->sg); xe_tt->sg = NULL; @@ -400,24 +398,24 @@ struct sg_table *xe_bo_sg(struct xe_bo *bo) * Account ttm pages against the device shrinker's shrinkable and * purgeable counts. */ -static void xe_ttm_tt_account_add(struct ttm_tt *tt) +static void xe_ttm_tt_account_add(struct xe_device *xe, struct ttm_tt *tt) { struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); if (xe_tt->purgeable) - xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages); + xe_shrinker_mod_pages(xe->mem.shrinker, 0, tt->num_pages); else - xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0); + xe_shrinker_mod_pages(xe->mem.shrinker, tt->num_pages, 0); } -static void xe_ttm_tt_account_subtract(struct ttm_tt *tt) +static void xe_ttm_tt_account_subtract(struct xe_device *xe, struct ttm_tt *tt) { struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); if (xe_tt->purgeable) - xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages); + xe_shrinker_mod_pages(xe->mem.shrinker, 0, -(long)tt->num_pages); else - xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0); + xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0); } static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, @@ -436,7 +434,6 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, return NULL; tt = &xe_tt->ttm; - xe_tt->xe = xe; extra_pages = 0; if (xe_bo_needs_ccs_pages(bo)) @@ -527,21 +524,23 @@ static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt, return err; xe_tt->purgeable = false; - xe_ttm_tt_account_add(tt); + xe_ttm_tt_account_add(ttm_to_xe_device(ttm_dev), tt); return 0; } static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt) { + struct xe_device *xe = ttm_to_xe_device(ttm_dev); + if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) return; - xe_tt_unmap_sg(tt); + xe_tt_unmap_sg(xe, tt); ttm_pool_free(&ttm_dev->pool, tt); - xe_ttm_tt_account_subtract(tt); + xe_ttm_tt_account_subtract(xe, tt); } static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt) @@ -789,7 +788,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, /* Bo creation path, moving to system or TT. */ if ((!old_mem && ttm) && !handle_system_ccs) { if (new_mem->mem_type == XE_PL_TT) - ret = xe_tt_map_sg(ttm); + ret = xe_tt_map_sg(xe, ttm); if (!ret) ttm_bo_move_null(ttm_bo, new_mem); goto out; @@ -812,7 +811,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, (!ttm && ttm_bo->type == ttm_bo_type_device); if (new_mem->mem_type == XE_PL_TT) { - ret = xe_tt_map_sg(ttm); + ret = xe_tt_map_sg(xe, ttm); if (ret) goto out; } @@ -958,7 +957,7 @@ out: if (timeout < 0) ret = timeout; - xe_tt_unmap_sg(ttm_bo->ttm); + xe_tt_unmap_sg(xe, ttm_bo->ttm); } return ret; @@ -968,6 +967,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, unsigned long *scanned) { + struct xe_device *xe = ttm_to_xe_device(bo->bdev); long lret; /* Fake move to system, without copying data. */ @@ -982,7 +982,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx, if (lret) return lret; - xe_tt_unmap_sg(bo->ttm); + xe_tt_unmap_sg(xe, bo->ttm); ttm_bo_move_null(bo, new_resource); } @@ -993,7 +993,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx, .allow_move = false}); if (lret > 0) - xe_ttm_tt_account_subtract(bo->ttm); + xe_ttm_tt_account_subtract(xe, bo->ttm); return lret; } @@ -1043,7 +1043,7 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); struct ttm_place place = {.mem_type = bo->resource->mem_type}; struct xe_bo *xe_bo = ttm_to_xe_bo(bo); - struct xe_device *xe = xe_tt->xe; + struct xe_device *xe = ttm_to_xe_device(bo->bdev); bool needs_rpm; long lret = 0L; @@ -1080,7 +1080,7 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, xe_pm_runtime_put(xe); if (lret > 0) - xe_ttm_tt_account_subtract(tt); + xe_ttm_tt_account_subtract(xe, tt); out_unref: xe_bo_put(xe_bo); @@ -1381,7 +1381,8 @@ int xe_bo_dma_unmap_pinned(struct xe_bo *bo) ttm_bo->sg = NULL; xe_tt->sg = NULL; } else if (xe_tt->sg) { - dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, + dma_unmap_sgtable(ttm_to_xe_device(ttm_bo->bdev)->drm.dev, + xe_tt->sg, DMA_BIDIRECTIONAL, 0); sg_free_table(xe_tt->sg); xe_tt->sg = NULL; @@ -2293,7 +2294,7 @@ int xe_bo_pin_external(struct xe_bo *bo) ttm_bo_pin(&bo->ttm); if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) - xe_ttm_tt_account_subtract(bo->ttm.ttm); + xe_ttm_tt_account_subtract(xe, bo->ttm.ttm); /* * FIXME: If we always use the reserve / unreserve functions for locking @@ -2341,7 +2342,7 @@ int xe_bo_pin(struct xe_bo *bo) ttm_bo_pin(&bo->ttm); if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) - xe_ttm_tt_account_subtract(bo->ttm.ttm); + xe_ttm_tt_account_subtract(xe, bo->ttm.ttm); /* * FIXME: If we always use the reserve / unreserve functions for locking @@ -2377,7 +2378,7 @@ void xe_bo_unpin_external(struct xe_bo *bo) ttm_bo_unpin(&bo->ttm); if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) - xe_ttm_tt_account_add(bo->ttm.ttm); + xe_ttm_tt_account_add(xe, bo->ttm.ttm); /* * FIXME: If we always use the reserve / unreserve functions for locking @@ -2409,7 +2410,7 @@ void xe_bo_unpin(struct xe_bo *bo) } ttm_bo_unpin(&bo->ttm); if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) - xe_ttm_tt_account_add(bo->ttm.ttm); + xe_ttm_tt_account_add(xe, bo->ttm.ttm); } /** @@ -2992,6 +2993,14 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo) if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM)) return false; + /* + * Compression implies coh_none, therefore we know for sure that WB + * memory can't currently use compression, which is likely one of the + * common cases. + */ + if (bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB) + return false; + return true; } @@ -3067,7 +3076,7 @@ void xe_bo_put(struct xe_bo *bo) #endif for_each_tile(tile, xe_bo_device(bo), id) if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt) - might_lock(&bo->ggtt_node[id]->ggtt->lock); + xe_ggtt_might_lock(bo->ggtt_node[id]->ggtt); drm_gem_object_put(&bo->ttm.base); } } diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index ed3746d32b27..7484ce55a303 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -197,9 +197,7 @@ static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo) if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile))) continue; - mutex_lock(&tile->mem.ggtt->lock); - xe_ggtt_map_bo(tile->mem.ggtt, bo); - mutex_unlock(&tile->mem.ggtt->lock); + xe_ggtt_map_bo_unlocked(tile->mem.ggtt, bo); } } diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c index cb9f175c89a1..8ec1ff1e4e80 100644 --- a/drivers/gpu/drm/xe/xe_configfs.c +++ b/drivers/gpu/drm/xe/xe_configfs.c @@ -3,14 +3,19 @@ * Copyright © 2025 Intel Corporation */ +#include <linux/bitops.h> #include <linux/configfs.h> +#include <linux/find.h> #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/string.h> #include "xe_configfs.h" #include "xe_module.h" +#include "xe_hw_engine_types.h" + /** * DOC: Xe Configfs * @@ -48,6 +53,30 @@ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported) * + * Allowed engines: + * ---------------- + * + * Allow only a set of engine(s) to be available, disabling the other engines + * even if they are available in hardware. This is applied after HW fuses are + * considered on each tile. Examples: + * + * Allow only one render and one copy engines, nothing else:: + * + * # echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed + * + * Allow only compute engines and first copy engine:: + * + * # echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed + * + * Note that the engine names are the per-GT hardware names. On multi-tile + * platforms, writing ``rcs0,bcs0`` to this file would allow the first render + * and copy engines on each tile. + * + * The requested configuration may not be supported by the platform and driver + * may fail to probe. For example: if at least one copy engine is expected to be + * available for migrations, but it's disabled. This is intended for debugging + * purposes only. + * * Remove devices * ============== * @@ -60,11 +89,30 @@ struct xe_config_device { struct config_group group; bool survivability_mode; + u64 engines_allowed; /* protects attributes */ struct mutex lock; }; +struct engine_info { + const char *cls; + u64 mask; +}; + +/* Some helpful macros to aid on the sizing of buffer allocation when parsing */ +#define MAX_ENGINE_CLASS_CHARS 5 +#define MAX_ENGINE_INSTANCE_CHARS 2 + +static const struct engine_info engine_info[] = { + { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK }, + { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK }, + { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK }, + { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK }, + { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK }, + { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK }, +}; + static struct xe_config_device *to_xe_config_device(struct config_item *item) { return container_of(to_config_group(item), struct xe_config_device, group); @@ -94,10 +142,96 @@ static ssize_t survivability_mode_store(struct config_item *item, const char *pa return len; } +static ssize_t engines_allowed_show(struct config_item *item, char *page) +{ + struct xe_config_device *dev = to_xe_config_device(item); + char *p = page; + + for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) { + u64 mask = engine_info[i].mask; + + if ((dev->engines_allowed & mask) == mask) { + p += sprintf(p, "%s*\n", engine_info[i].cls); + } else if (mask & dev->engines_allowed) { + u16 bit0 = __ffs64(mask), bit; + + mask &= dev->engines_allowed; + + for_each_set_bit(bit, (const unsigned long *)&mask, 64) + p += sprintf(p, "%s%u\n", engine_info[i].cls, + bit - bit0); + } + } + + return p - page; +} + +static bool lookup_engine_mask(const char *pattern, u64 *mask) +{ + for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) { + u8 instance; + u16 bit; + + if (!str_has_prefix(pattern, engine_info[i].cls)) + continue; + + pattern += strlen(engine_info[i].cls); + + if (!strcmp(pattern, "*")) { + *mask = engine_info[i].mask; + return true; + } + + if (kstrtou8(pattern, 10, &instance)) + return false; + + bit = __ffs64(engine_info[i].mask) + instance; + if (bit >= fls64(engine_info[i].mask)) + return false; + + *mask = BIT_ULL(bit); + return true; + } + + return false; +} + +static ssize_t engines_allowed_store(struct config_item *item, const char *page, + size_t len) +{ + struct xe_config_device *dev = to_xe_config_device(item); + size_t patternlen, p; + u64 mask, val = 0; + + for (p = 0; p < len; p += patternlen + 1) { + char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1]; + + patternlen = strcspn(page + p, ",\n"); + if (patternlen >= sizeof(buf)) + return -EINVAL; + + memcpy(buf, page + p, patternlen); + buf[patternlen] = '\0'; + + if (!lookup_engine_mask(buf, &mask)) + return -EINVAL; + + val |= mask; + } + + mutex_lock(&dev->lock); + dev->engines_allowed = val; + mutex_unlock(&dev->lock); + + return len; +} + CONFIGFS_ATTR(, survivability_mode); +CONFIGFS_ATTR(, engines_allowed); static struct configfs_attribute *xe_config_device_attrs[] = { &attr_survivability_mode, + &attr_engines_allowed, NULL, }; @@ -139,6 +273,9 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro if (!dev) return ERR_PTR(-ENOMEM); + /* Default values */ + dev->engines_allowed = U64_MAX; + config_group_init_type_name(&dev->group, name, &xe_config_device_type); mutex_init(&dev->lock); @@ -226,6 +363,29 @@ void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) config_item_put(&dev->group.cg_item); } +/** + * xe_configfs_get_engines_allowed - get engine allowed mask from configfs + * @pdev: pci device + * + * Find the configfs group that belongs to the pci device and return + * the mask of engines allowed to be used. + * + * Return: engine mask with allowed engines + */ +u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) +{ + struct xe_config_device *dev = configfs_find_group(pdev); + u64 engines_allowed; + + if (!dev) + return U64_MAX; + + engines_allowed = dev->engines_allowed; + config_item_put(&dev->group.cg_item); + + return engines_allowed; +} + int __init xe_configfs_init(void) { struct config_group *root = &xe_configfs.su_group; diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h index d7d041ec2611..fb8764008089 100644 --- a/drivers/gpu/drm/xe/xe_configfs.h +++ b/drivers/gpu/drm/xe/xe_configfs.h @@ -5,6 +5,7 @@ #ifndef _XE_CONFIGFS_H_ #define _XE_CONFIGFS_H_ +#include <linux/limits.h> #include <linux/types.h> struct pci_dev; @@ -14,11 +15,13 @@ int xe_configfs_init(void); void xe_configfs_exit(void); bool xe_configfs_get_survivability_mode(struct pci_dev *pdev); void xe_configfs_clear_survivability_mode(struct pci_dev *pdev); +u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev); #else -static inline int xe_configfs_init(void) { return 0; }; -static inline void xe_configfs_exit(void) {}; -static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; }; -static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) {}; +static inline int xe_configfs_init(void) { return 0; } +static inline void xe_configfs_exit(void) { } +static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; } +static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) { } +static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; } #endif #endif diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c index d0503959a8ed..d83cd6ed3fa8 100644 --- a/drivers/gpu/drm/xe/xe_debugfs.c +++ b/drivers/gpu/drm/xe/xe_debugfs.c @@ -191,6 +191,41 @@ static const struct file_operations wedged_mode_fops = { .write = wedged_mode_set, }; +static ssize_t atomic_svm_timeslice_ms_show(struct file *f, char __user *ubuf, + size_t size, loff_t *pos) +{ + struct xe_device *xe = file_inode(f)->i_private; + char buf[32]; + int len = 0; + + len = scnprintf(buf, sizeof(buf), "%d\n", xe->atomic_svm_timeslice_ms); + + return simple_read_from_buffer(ubuf, size, pos, buf, len); +} + +static ssize_t atomic_svm_timeslice_ms_set(struct file *f, + const char __user *ubuf, + size_t size, loff_t *pos) +{ + struct xe_device *xe = file_inode(f)->i_private; + u32 atomic_svm_timeslice_ms; + ssize_t ret; + + ret = kstrtouint_from_user(ubuf, size, 0, &atomic_svm_timeslice_ms); + if (ret) + return ret; + + xe->atomic_svm_timeslice_ms = atomic_svm_timeslice_ms; + + return size; +} + +static const struct file_operations atomic_svm_timeslice_ms_fops = { + .owner = THIS_MODULE, + .read = atomic_svm_timeslice_ms_show, + .write = atomic_svm_timeslice_ms_set, +}; + void xe_debugfs_register(struct xe_device *xe) { struct ttm_device *bdev = &xe->ttm; @@ -211,6 +246,9 @@ void xe_debugfs_register(struct xe_device *xe) debugfs_create_file("wedged_mode", 0600, root, xe, &wedged_mode_fops); + debugfs_create_file("atomic_svm_timeslice_ms", 0600, root, xe, + &atomic_svm_timeslice_ms_fops); + for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) { man = ttm_manager_type(bdev, mem_type); diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index c02c4c4e9412..7d9a31868ea9 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -402,9 +402,6 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy) if (xe->unordered_wq) destroy_workqueue(xe->unordered_wq); - if (!IS_ERR_OR_NULL(xe->mem.shrinker)) - xe_shrinker_destroy(xe->mem.shrinker); - if (xe->destroy_wq) destroy_workqueue(xe->destroy_wq); @@ -438,13 +435,14 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, if (err) goto err; - xe->mem.shrinker = xe_shrinker_create(xe); - if (IS_ERR(xe->mem.shrinker)) - return ERR_CAST(xe->mem.shrinker); + err = xe_shrinker_create(xe); + if (err) + goto err; xe->info.devid = pdev->device; xe->info.revid = pdev->revision; xe->info.force_execlist = xe_modparam.force_execlist; + xe->atomic_svm_timeslice_ms = 5; err = xe_irq_init(xe); if (err) @@ -804,18 +802,19 @@ int xe_device_probe(struct xe_device *xe) * be performed. */ xe_gt_mmio_init(gt); - } - for_each_tile(tile, xe, id) { if (IS_SRIOV_VF(xe)) { - xe_guc_comm_init_early(&tile->primary_gt->uc.guc); - err = xe_gt_sriov_vf_bootstrap(tile->primary_gt); + xe_guc_comm_init_early(>->uc.guc); + err = xe_gt_sriov_vf_bootstrap(gt); if (err) return err; - err = xe_gt_sriov_vf_query_config(tile->primary_gt); + err = xe_gt_sriov_vf_query_config(gt); if (err) return err; } + } + + for_each_tile(tile, xe, id) { err = xe_ggtt_init_early(tile->mem.ggtt); if (err) return err; diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 0bc3bc8e6803..e4da797a984b 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -195,6 +195,8 @@ void xe_device_declare_wedged(struct xe_device *xe); struct xe_file *xe_file_get(struct xe_file *xef); void xe_file_put(struct xe_file *xef); +int xe_is_injection_active(void); + /* * Occasionally it is seen that the G2H worker starts running after a delay of more than * a second even after being queued and activated by the Linux workqueue subsystem. This diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 6383a1c0d478..a504b8ea6f3f 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -502,6 +502,10 @@ struct xe_device { const struct xe_pat_table_entry *table; /** @pat.n_entries: Number of PAT entries */ int n_entries; + /** @pat.ats_entry: PAT entry for PCIe ATS responses */ + const struct xe_pat_table_entry *pat_ats; + /** @pat.pta_entry: PAT entry for page table accesses */ + const struct xe_pat_table_entry *pat_pta; u32 idx[__XE_CACHE_LEVEL_COUNT]; } pat; @@ -576,6 +580,9 @@ struct xe_device { /** @pmu: performance monitoring unit */ struct xe_pmu pmu; + /** @atomic_svm_timeslice_ms: Atomic SVM fault timeslice MS */ + u32 atomic_svm_timeslice_ms; + #ifdef TEST_VM_OPS_ERROR /** * @vm_inject_error_position: inject errors at different places in VM diff --git a/drivers/gpu/drm/xe/xe_drv.h b/drivers/gpu/drm/xe/xe_drv.h index d61650d4aa0b..95242a375e54 100644 --- a/drivers/gpu/drm/xe/xe_drv.h +++ b/drivers/gpu/drm/xe/xe_drv.h @@ -9,7 +9,7 @@ #include <drm/drm_drv.h> #define DRIVER_NAME "xe" -#define DRIVER_DESC "Intel Xe Graphics" +#define DRIVER_DESC "Intel Xe2 Graphics" /* Interface history: * diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 7062115909f2..7b11fa1356f0 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -5,6 +5,7 @@ #include "xe_ggtt.h" +#include <kunit/visibility.h> #include <linux/fault-inject.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/sizes.h> @@ -22,12 +23,13 @@ #include "xe_device.h" #include "xe_gt.h" #include "xe_gt_printk.h" -#include "xe_gt_sriov_vf.h" #include "xe_gt_tlb_invalidation.h" #include "xe_map.h" #include "xe_mmio.h" #include "xe_pm.h" +#include "xe_res_cursor.h" #include "xe_sriov.h" +#include "xe_tile_sriov_vf.h" #include "xe_wa.h" #include "xe_wopcm.h" @@ -64,13 +66,9 @@ * give us the correct placement for free. */ -static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset, - u16 pat_index) +static u64 xelp_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index) { - u64 pte; - - pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE); - pte |= XE_PAGE_PRESENT; + u64 pte = XE_PAGE_PRESENT; if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)) pte |= XE_GGTT_PTE_DM; @@ -78,13 +76,12 @@ static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset, return pte; } -static u64 xelpg_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset, - u16 pat_index) +static u64 xelpg_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index) { struct xe_device *xe = xe_bo_device(bo); u64 pte; - pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, pat_index); + pte = xelp_ggtt_pte_flags(bo, pat_index); xe_assert(xe, pat_index <= 3); @@ -149,8 +146,9 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size) xe_tile_assert(ggtt->tile, start < end); if (ggtt->scratch) - scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0, - pat_index); + scratch_pte = xe_bo_addr(ggtt->scratch, 0, XE_PAGE_SIZE) | + ggtt->pt_ops->pte_encode_flags(ggtt->scratch, + pat_index); else scratch_pte = 0; @@ -160,6 +158,22 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size) } } +/** + * xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile + * @tile: &xe_tile + * + * Allocates a &xe_ggtt for a given tile. + * + * Return: &xe_ggtt on success, or NULL when out of memory. + */ +struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile) +{ + struct xe_ggtt *ggtt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*ggtt), GFP_KERNEL); + if (ggtt) + ggtt->tile = tile; + return ggtt; +} + static void ggtt_fini_early(struct drm_device *drm, void *arg) { struct xe_ggtt *ggtt = arg; @@ -176,6 +190,13 @@ static void ggtt_fini(void *arg) ggtt->scratch = NULL; } +#ifdef CONFIG_LOCKDEP +void xe_ggtt_might_lock(struct xe_ggtt *ggtt) +{ + might_lock(&ggtt->lock); +} +#endif + static void primelockdep(struct xe_ggtt *ggtt) { if (!IS_ENABLED(CONFIG_LOCKDEP)) @@ -187,20 +208,36 @@ static void primelockdep(struct xe_ggtt *ggtt) } static const struct xe_ggtt_pt_ops xelp_pt_ops = { - .pte_encode_bo = xelp_ggtt_pte_encode_bo, + .pte_encode_flags = xelp_ggtt_pte_flags, .ggtt_set_pte = xe_ggtt_set_pte, }; static const struct xe_ggtt_pt_ops xelpg_pt_ops = { - .pte_encode_bo = xelpg_ggtt_pte_encode_bo, + .pte_encode_flags = xelpg_ggtt_pte_flags, .ggtt_set_pte = xe_ggtt_set_pte, }; static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = { - .pte_encode_bo = xelpg_ggtt_pte_encode_bo, + .pte_encode_flags = xelpg_ggtt_pte_flags, .ggtt_set_pte = xe_ggtt_set_pte_and_flush, }; +static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved) +{ + drm_mm_init(&ggtt->mm, reserved, + ggtt->size - reserved); + mutex_init(&ggtt->lock); + primelockdep(ggtt); +} + +int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size) +{ + ggtt->size = size; + __xe_ggtt_init_early(ggtt, reserved); + return 0; +} +EXPORT_SYMBOL_IF_KUNIT(xe_ggtt_init_kunit); + /** * xe_ggtt_init_early - Early GGTT initialization * @ggtt: the &xe_ggtt to be initialized @@ -219,7 +256,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) unsigned int gsm_size; int err; - if (IS_SRIOV_VF(xe)) + if (IS_SRIOV_VF(xe) || GRAPHICS_VERx100(xe) >= 1250) gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */ else gsm_size = probe_gsm_size(pdev); @@ -247,18 +284,14 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) ggtt->pt_ops = &xelp_pt_ops; ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM); - - drm_mm_init(&ggtt->mm, xe_wopcm_size(xe), - ggtt->size - xe_wopcm_size(xe)); - mutex_init(&ggtt->lock); - primelockdep(ggtt); + __xe_ggtt_init_early(ggtt, xe_wopcm_size(xe)); err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt); if (err) return err; if (IS_SRIOV_VF(xe)) { - err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0)); + err = xe_tile_sriov_vf_prepare_ggtt(ggtt->tile); if (err) return err; } @@ -429,16 +462,17 @@ static void xe_ggtt_dump_node(struct xe_ggtt *ggtt, } /** - * xe_ggtt_node_insert_balloon - prevent allocation of specified GGTT addresses + * xe_ggtt_node_insert_balloon_locked - prevent allocation of specified GGTT addresses * @node: the &xe_ggtt_node to hold reserved GGTT node * @start: the starting GGTT address of the reserved region * @end: then end GGTT address of the reserved region * - * Use xe_ggtt_node_remove_balloon() to release a reserved GGTT node. + * To be used in cases where ggtt->lock is already taken. + * Use xe_ggtt_node_remove_balloon_locked() to release a reserved GGTT node. * * Return: 0 on success or a negative error code on failure. */ -int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end) +int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64 end) { struct xe_ggtt *ggtt = node->ggtt; int err; @@ -447,14 +481,13 @@ int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end) xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE)); xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE)); xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base)); + lockdep_assert_held(&ggtt->lock); node->base.color = 0; node->base.start = start; node->base.size = end - start; - mutex_lock(&ggtt->lock); err = drm_mm_reserve_node(&ggtt->mm, &node->base); - mutex_unlock(&ggtt->lock); if (xe_gt_WARN(ggtt->tile->primary_gt, err, "Failed to balloon GGTT %#llx-%#llx (%pe)\n", @@ -466,27 +499,72 @@ int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end) } /** - * xe_ggtt_node_remove_balloon - release a reserved GGTT region + * xe_ggtt_node_remove_balloon_locked - release a reserved GGTT region * @node: the &xe_ggtt_node with reserved GGTT region * - * See xe_ggtt_node_insert_balloon() for details. + * To be used in cases where ggtt->lock is already taken. + * See xe_ggtt_node_insert_balloon_locked() for details. */ -void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node) +void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node) { - if (!node || !node->ggtt) + if (!xe_ggtt_node_allocated(node)) return; - if (!drm_mm_node_allocated(&node->base)) - goto free_node; + lockdep_assert_held(&node->ggtt->lock); xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon"); - mutex_lock(&node->ggtt->lock); drm_mm_remove_node(&node->base); - mutex_unlock(&node->ggtt->lock); +} -free_node: - xe_ggtt_node_fini(node); +static void xe_ggtt_assert_fit(struct xe_ggtt *ggtt, u64 start, u64 size) +{ + struct xe_tile *tile = ggtt->tile; + struct xe_device *xe = tile_to_xe(tile); + u64 __maybe_unused wopcm = xe_wopcm_size(xe); + + xe_tile_assert(tile, start >= wopcm); + xe_tile_assert(tile, start + size < ggtt->size - wopcm); +} + +/** + * xe_ggtt_shift_nodes_locked - Shift GGTT nodes to adjust for a change in usable address range. + * @ggtt: the &xe_ggtt struct instance + * @shift: change to the location of area provisioned for current VF + * + * This function moves all nodes from the GGTT VM, to a temp list. These nodes are expected + * to represent allocations in range formerly assigned to current VF, before the range changed. + * When the GGTT VM is completely clear of any nodes, they are re-added with shifted offsets. + * + * The function has no ability of failing - because it shifts existing nodes, without + * any additional processing. If the nodes were successfully existing at the old address, + * they will do the same at the new one. A fail inside this function would indicate that + * the list of nodes was either already damaged, or that the shift brings the address range + * outside of valid bounds. Both cases justify an assert rather than error code. + */ +void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift) +{ + struct xe_tile *tile __maybe_unused = ggtt->tile; + struct drm_mm_node *node, *tmpn; + LIST_HEAD(temp_list_head); + + lockdep_assert_held(&ggtt->lock); + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) + drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm) + xe_ggtt_assert_fit(ggtt, node->start + shift, node->size); + + drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm) { + drm_mm_remove_node(node); + list_add(&node->node_list, &temp_list_head); + } + + list_for_each_entry_safe(node, tmpn, &temp_list_head, node_list) { + list_del(&node->node_list); + node->start += shift; + drm_mm_reserve_node(&ggtt->mm, node); + xe_tile_assert(tile, drm_mm_node_allocated(node)); + } } /** @@ -537,12 +615,12 @@ int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align) * xe_ggtt_node_init - Initialize %xe_ggtt_node struct * @ggtt: the &xe_ggtt where the new node will later be inserted/reserved. * - * This function will allocated the struct %xe_ggtt_node and return it's pointer. + * This function will allocate the struct %xe_ggtt_node and return its pointer. * This struct will then be freed after the node removal upon xe_ggtt_node_remove() - * or xe_ggtt_node_remove_balloon(). + * or xe_ggtt_node_remove_balloon_locked(). * Having %xe_ggtt_node struct allocated doesn't mean that the node is already allocated * in GGTT. Only the xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(), - * xe_ggtt_node_insert_balloon() will ensure the node is inserted or reserved in GGTT. + * xe_ggtt_node_insert_balloon_locked() will ensure the node is inserted or reserved in GGTT. * * Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise. **/ @@ -564,7 +642,7 @@ struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt) * @node: the &xe_ggtt_node to be freed * * If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(), - * or xe_ggtt_node_insert_balloon(); and this @node is not going to be reused, then, + * or xe_ggtt_node_insert_balloon_locked(); and this @node is not going to be reused, then, * this function needs to be called to free the %xe_ggtt_node struct **/ void xe_ggtt_node_fini(struct xe_ggtt_node *node) @@ -589,26 +667,59 @@ bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node) /** * xe_ggtt_map_bo - Map the BO into GGTT * @ggtt: the &xe_ggtt where node will be mapped + * @node: the &xe_ggtt_node where this BO is mapped * @bo: the &xe_bo to be mapped + * @pat_index: Which pat_index to use. */ -void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) +void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node, + struct xe_bo *bo, u16 pat_index) { - u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB; - u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode]; - u64 start; - u64 offset, pte; - if (XE_WARN_ON(!bo->ggtt_node[ggtt->tile->id])) + u64 start, pte, end; + struct xe_res_cursor cur; + + if (XE_WARN_ON(!node)) return; - start = bo->ggtt_node[ggtt->tile->id]->base.start; + start = node->base.start; + end = start + bo->size; + + pte = ggtt->pt_ops->pte_encode_flags(bo, pat_index); + if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) { + xe_assert(xe_bo_device(bo), bo->ttm.ttm); + + for (xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &cur); + cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE)) + ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining, + pte | xe_res_dma(&cur)); + } else { + /* Prepend GPU offset */ + pte |= vram_region_gpu_offset(bo->ttm.resource); - for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) { - pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index); - ggtt->pt_ops->ggtt_set_pte(ggtt, start + offset, pte); + for (xe_res_first(bo->ttm.resource, 0, bo->size, &cur); + cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE)) + ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining, + pte + cur.start); } } +/** + * xe_ggtt_map_bo_unlocked - Restore a mapping of a BO into GGTT + * @ggtt: the &xe_ggtt where node will be mapped + * @bo: the &xe_bo to be mapped + * + * This is used to restore a GGTT mapping after suspend. + */ +void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo) +{ + u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB; + u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode]; + + mutex_lock(&ggtt->lock); + xe_ggtt_map_bo(ggtt, bo->ggtt_node[ggtt->tile->id], bo, pat_index); + mutex_unlock(&ggtt->lock); +} + static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, u64 start, u64 end) { @@ -645,7 +756,10 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, xe_ggtt_node_fini(bo->ggtt_node[tile_id]); bo->ggtt_node[tile_id] = NULL; } else { - xe_ggtt_map_bo(ggtt, bo); + u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB; + u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode]; + + xe_ggtt_map_bo(ggtt, bo->ggtt_node[tile_id], bo, pat_index); } mutex_unlock(&ggtt->lock); @@ -841,3 +955,30 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer return total; } + +/** + * xe_ggtt_encode_pte_flags - Get PTE encoding flags for BO + * @ggtt: &xe_ggtt + * @bo: &xe_bo + * @pat_index: The pat_index for the PTE. + * + * This function returns the pte_flags for a given BO, without address. + * It's used for DPT to fill a GGTT mapped BO with a linear lookup table. + */ +u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt, + struct xe_bo *bo, u16 pat_index) +{ + return ggtt->pt_ops->pte_encode_flags(bo, pat_index); +} + +/** + * xe_ggtt_read_pte - Read a PTE from the GGTT + * @ggtt: &xe_ggtt + * @offset: the offset for which the mapping should be read. + * + * Used by testcases, and by display reading out an inherited bios FB. + */ +u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset) +{ + return ioread64(ggtt->gsm + (offset / XE_PAGE_SIZE)); +} diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h index 27e7d67de004..fbe1e397d05d 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.h +++ b/drivers/gpu/drm/xe/xe_ggtt.h @@ -9,22 +9,28 @@ #include "xe_ggtt_types.h" struct drm_printer; +struct xe_tile; +struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile); int xe_ggtt_init_early(struct xe_ggtt *ggtt); +int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size); int xe_ggtt_init(struct xe_ggtt *ggtt); struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt); void xe_ggtt_node_fini(struct xe_ggtt_node *node); -int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, - u64 start, u64 size); -void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node); +int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, + u64 start, u64 size); +void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node); +void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift); int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align); int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node, u32 size, u32 align, u32 mm_flags); void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate); bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node); -void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); +void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node, + struct xe_bo *bo, u16 pat_index); +void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo); int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, u64 start, u64 end); @@ -38,4 +44,14 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid); #endif +#ifndef CONFIG_LOCKDEP +static inline void xe_ggtt_might_lock(struct xe_ggtt *ggtt) +{ } +#else +void xe_ggtt_might_lock(struct xe_ggtt *ggtt); +#endif + +u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt, struct xe_bo *bo, u16 pat_index); +u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset); + #endif diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h index cb02b7994a9a..c5e999d58ff2 100644 --- a/drivers/gpu/drm/xe/xe_ggtt_types.h +++ b/drivers/gpu/drm/xe/xe_ggtt_types.h @@ -74,8 +74,8 @@ struct xe_ggtt_node { * Which can vary from platform to platform. */ struct xe_ggtt_pt_ops { - /** @pte_encode_bo: Encode PTE address for a given BO */ - u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index); + /** @pte_encode_flags: Encode PTE flags for a given BO */ + u64 (*pte_encode_flags)(struct xe_bo *bo, u16 pat_index); /** @ggtt_set_pte: Directly write into GGTT's PTE */ void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte); }; diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 0e5d243c9451..9752a38c0162 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -118,7 +118,7 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); } - xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3); + xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF); xe_force_wake_put(gt_to_fw(gt), fw_ref); } @@ -417,6 +417,8 @@ int xe_gt_init_early(struct xe_gt *gt) if (err) return err; + xe_mocs_init_early(gt); + return 0; } @@ -634,8 +636,6 @@ int xe_gt_init(struct xe_gt *gt) if (err) return err; - xe_mocs_init_early(gt); - err = xe_gt_sysfs_init(gt); if (err) return err; diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c index 119a55bb7580..848618acdca8 100644 --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c @@ -122,24 +122,6 @@ static int powergate_info(struct xe_gt *gt, struct drm_printer *p) return ret; } -static int force_reset(struct xe_gt *gt, struct drm_printer *p) -{ - xe_pm_runtime_get(gt_to_xe(gt)); - xe_gt_reset_async(gt); - xe_pm_runtime_put(gt_to_xe(gt)); - - return 0; -} - -static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p) -{ - xe_pm_runtime_get(gt_to_xe(gt)); - xe_gt_reset(gt); - xe_pm_runtime_put(gt_to_xe(gt)); - - return 0; -} - static int sa_info(struct xe_gt *gt, struct drm_printer *p) { struct xe_tile *tile = gt_to_tile(gt); @@ -306,8 +288,6 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p) * - without access to the PF specific data */ static const struct drm_info_list vf_safe_debugfs_list[] = { - {"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset}, - {"force_reset_sync", .show = xe_gt_debugfs_simple_show, .data = force_reset_sync}, {"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info}, {"topology", .show = xe_gt_debugfs_simple_show, .data = topology}, {"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt}, @@ -332,6 +312,78 @@ static const struct drm_info_list pf_only_debugfs_list[] = { {"steering", .show = xe_gt_debugfs_simple_show, .data = steering}, }; +static ssize_t write_to_gt_call(const char __user *userbuf, size_t count, loff_t *ppos, + void (*call)(struct xe_gt *), struct xe_gt *gt) +{ + bool yes; + int ret; + + if (*ppos) + return -EINVAL; + ret = kstrtobool_from_user(userbuf, count, &yes); + if (ret < 0) + return ret; + if (yes) + call(gt); + return count; +} + +static void force_reset(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + + xe_pm_runtime_get(xe); + xe_gt_reset_async(gt); + xe_pm_runtime_put(xe); +} + +static ssize_t force_reset_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct xe_gt *gt = s->private; + + return write_to_gt_call(userbuf, count, ppos, force_reset, gt); +} + +static int force_reset_show(struct seq_file *s, void *unused) +{ + struct xe_gt *gt = s->private; + + force_reset(gt); /* to be deprecated! */ + return 0; +} +DEFINE_SHOW_STORE_ATTRIBUTE(force_reset); + +static void force_reset_sync(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + + xe_pm_runtime_get(xe); + xe_gt_reset(gt); + xe_pm_runtime_put(xe); +} + +static ssize_t force_reset_sync_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct xe_gt *gt = s->private; + + return write_to_gt_call(userbuf, count, ppos, force_reset_sync, gt); +} + +static int force_reset_sync_show(struct seq_file *s, void *unused) +{ + struct xe_gt *gt = s->private; + + force_reset_sync(gt); /* to be deprecated! */ + return 0; +} +DEFINE_SHOW_STORE_ATTRIBUTE(force_reset_sync); + void xe_gt_debugfs_register(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); @@ -355,6 +407,10 @@ void xe_gt_debugfs_register(struct xe_gt *gt) */ root->d_inode->i_private = gt; + /* VF safe */ + debugfs_create_file("force_reset", 0600, root, gt, &force_reset_fops); + debugfs_create_file("force_reset_sync", 0600, root, gt, &force_reset_sync_fops); + drm_debugfs_create_files(vf_safe_debugfs_list, ARRAY_SIZE(vf_safe_debugfs_list), root, minor); diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index 10622ca471a2..3522865c67c9 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -14,6 +14,7 @@ #include "abi/guc_actions_abi.h" #include "xe_bo.h" #include "xe_gt.h" +#include "xe_gt_printk.h" #include "xe_gt_stats.h" #include "xe_gt_tlb_invalidation.h" #include "xe_guc.h" @@ -68,31 +69,8 @@ static bool access_is_atomic(enum access_type access_type) static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma) { - return BIT(tile->id) & vma->tile_present && - !(BIT(tile->id) & vma->tile_invalidated); -} - -static bool vma_matches(struct xe_vma *vma, u64 page_addr) -{ - if (page_addr > xe_vma_end(vma) - 1 || - page_addr + SZ_4K - 1 < xe_vma_start(vma)) - return false; - - return true; -} - -static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr) -{ - struct xe_vma *vma = NULL; - - if (vm->usm.last_fault_vma) { /* Fast lookup */ - if (vma_matches(vm->usm.last_fault_vma, page_addr)) - vma = vm->usm.last_fault_vma; - } - if (!vma) - vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K); - - return vma; + return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present, + vma->tile_invalidated); } static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma, @@ -143,7 +121,7 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma, trace_xe_vma_pagefault(vma); - /* Check if VMA is valid */ + /* Check if VMA is valid, opportunistic check only */ if (vma_is_valid(tile, vma) && !atomic) return 0; @@ -180,7 +158,6 @@ retry_userptr: dma_fence_wait(fence, false); dma_fence_put(fence); - vma->tile_invalidated &= ~BIT(tile->id); unlock_dma_resv: drm_exec_fini(&exec); @@ -231,7 +208,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) goto unlock_vm; } - vma = lookup_vma(vm, pf->page_addr); + vma = xe_vm_find_vma_by_addr(vm, pf->page_addr); if (!vma) { err = -EINVAL; goto unlock_vm; @@ -266,22 +243,22 @@ static int send_pagefault_reply(struct xe_guc *guc, return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0); } -static void print_pagefault(struct xe_device *xe, struct pagefault *pf) +static void print_pagefault(struct xe_gt *gt, struct pagefault *pf) { - drm_dbg(&xe->drm, "\n\tASID: %d\n" - "\tVFID: %d\n" - "\tPDATA: 0x%04x\n" - "\tFaulted Address: 0x%08x%08x\n" - "\tFaultType: %d\n" - "\tAccessType: %d\n" - "\tFaultLevel: %d\n" - "\tEngineClass: %d %s\n" - "\tEngineInstance: %d\n", - pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr), - lower_32_bits(pf->page_addr), - pf->fault_type, pf->access_type, pf->fault_level, - pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class), - pf->engine_instance); + xe_gt_dbg(gt, "\n\tASID: %d\n" + "\tVFID: %d\n" + "\tPDATA: 0x%04x\n" + "\tFaulted Address: 0x%08x%08x\n" + "\tFaultType: %d\n" + "\tAccessType: %d\n" + "\tFaultLevel: %d\n" + "\tEngineClass: %d %s\n" + "\tEngineInstance: %d\n", + pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr), + lower_32_bits(pf->page_addr), + pf->fault_type, pf->access_type, pf->fault_level, + pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class), + pf->engine_instance); } #define PF_MSG_LEN_DW 4 @@ -333,7 +310,6 @@ static bool pf_queue_full(struct pf_queue *pf_queue) int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len) { struct xe_gt *gt = guc_to_gt(guc); - struct xe_device *xe = gt_to_xe(gt); struct pf_queue *pf_queue; unsigned long flags; u32 asid; @@ -358,7 +334,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len) pf_queue->num_dw; queue_work(gt->usm.pf_wq, &pf_queue->worker); } else { - drm_warn(&xe->drm, "PF Queue full, shouldn't be possible"); + xe_gt_warn(gt, "PageFault Queue full, shouldn't be possible\n"); } spin_unlock_irqrestore(&pf_queue->lock, flags); @@ -371,7 +347,6 @@ static void pf_queue_work_func(struct work_struct *w) { struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker); struct xe_gt *gt = pf_queue->gt; - struct xe_device *xe = gt_to_xe(gt); struct xe_guc_pagefault_reply reply = {}; struct pagefault pf = {}; unsigned long threshold; @@ -382,9 +357,9 @@ static void pf_queue_work_func(struct work_struct *w) while (get_pagefault(pf_queue, &pf)) { ret = handle_pagefault(gt, &pf); if (unlikely(ret)) { - print_pagefault(xe, &pf); + print_pagefault(gt, &pf); pf.fault_unsuccessful = 1; - drm_dbg(&xe->drm, "Fault response: Unsuccessful %d\n", ret); + xe_gt_dbg(gt, "Fault response: Unsuccessful %pe\n", ERR_PTR(ret)); } reply.dw0 = FIELD_PREP(PFR_VALID, 1) | @@ -537,21 +512,21 @@ static int sub_granularity_in_byte(int val) return (granularity_in_byte(val) / 32); } -static void print_acc(struct xe_device *xe, struct acc *acc) +static void print_acc(struct xe_gt *gt, struct acc *acc) { - drm_warn(&xe->drm, "Access counter request:\n" - "\tType: %s\n" - "\tASID: %d\n" - "\tVFID: %d\n" - "\tEngine: %d:%d\n" - "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n" - "\tSub_Granularity Vector: 0x%08x\n" - "\tVA Range base: 0x%016llx\n", - acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL", - acc->asid, acc->vfid, acc->engine_class, acc->engine_instance, - granularity_in_byte(acc->granularity) / SZ_1K, - sub_granularity_in_byte(acc->granularity) / SZ_1K, - acc->sub_granularity, acc->va_range_base); + xe_gt_warn(gt, "Access counter request:\n" + "\tType: %s\n" + "\tASID: %d\n" + "\tVFID: %d\n" + "\tEngine: %d:%d\n" + "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n" + "\tSub_Granularity Vector: 0x%08x\n" + "\tVA Range base: 0x%016llx\n", + acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL", + acc->asid, acc->vfid, acc->engine_class, acc->engine_instance, + granularity_in_byte(acc->granularity) / SZ_1K, + sub_granularity_in_byte(acc->granularity) / SZ_1K, + acc->sub_granularity, acc->va_range_base); } static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc) @@ -649,7 +624,6 @@ static void acc_queue_work_func(struct work_struct *w) { struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker); struct xe_gt *gt = acc_queue->gt; - struct xe_device *xe = gt_to_xe(gt); struct acc acc = {}; unsigned long threshold; int ret; @@ -659,8 +633,8 @@ static void acc_queue_work_func(struct work_struct *w) while (get_acc(acc_queue, &acc)) { ret = handle_acc(gt, &acc); if (unlikely(ret)) { - print_acc(xe, &acc); - drm_warn(&xe->drm, "ACC: Unsuccessful %d\n", ret); + print_acc(gt, &acc); + xe_gt_warn(gt, "ACC: Unsuccessful %pe\n", ERR_PTR(ret)); } if (time_after(jiffies, threshold) && @@ -705,7 +679,7 @@ int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len) acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW; queue_work(gt->usm.acc_wq, &acc_queue->worker); } else { - drm_warn(>_to_xe(gt)->drm, "ACC Queue full, dropping ACC"); + xe_gt_warn(gt, "ACC Queue full, dropping ACC\n"); } spin_unlock(&acc_queue->lock); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 2420a548cacc..3556c41c041b 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -1520,6 +1520,8 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size { int err; + xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt))); + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); if (vfid) err = pf_provision_vf_lmem(gt, vfid, size); @@ -1629,7 +1631,7 @@ int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, xe_gt_assert(gt, num_vfs); xe_gt_assert(gt, !xe_gt_is_media_type(gt)); - if (!IS_DGFX(gt_to_xe(gt))) + if (!xe_device_has_lmtt(gt_to_xe(gt))) return 0; mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); @@ -2163,7 +2165,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid) valid_all = valid_all && valid_ggtt; valid_any = valid_any || (valid_ggtt && is_primary); - if (IS_DGFX(xe)) { + if (xe_device_has_lmtt(xe)) { bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid); valid_any = valid_any || (valid_lmem && is_primary); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c index 0fe47f41b63c..13970d5a2867 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c @@ -308,7 +308,7 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne if (!xe_gt_is_media_type(gt)) { debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare", 0644, parent, parent, &ggtt_fops); - if (IS_DGFX(gt_to_xe(gt))) + if (xe_device_has_lmtt(gt_to_xe(gt))) debugfs_create_file_unsafe(vfid ? "lmem_quota" : "lmem_spare", 0644, parent, parent, &lmem_fops); } @@ -558,7 +558,7 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root) drm_debugfs_create_files(pf_ggtt_info, ARRAY_SIZE(pf_ggtt_info), pfdentry, minor); - if (IS_DGFX(gt_to_xe(gt))) + if (xe_device_has_lmtt(gt_to_xe(gt))) drm_debugfs_create_files(pf_lmem_info, ARRAY_SIZE(pf_lmem_info), pfdentry, minor); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c index a439261bf4d7..9b2fc9db55b8 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c @@ -82,17 +82,17 @@ int xe_gt_sriov_vf_reset(struct xe_gt *gt) } static int guc_action_match_version(struct xe_guc *guc, - u32 wanted_branch, u32 wanted_major, u32 wanted_minor, - u32 *branch, u32 *major, u32 *minor, u32 *patch) + struct xe_uc_fw_version *wanted, + struct xe_uc_fw_version *found) { u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = { FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_MATCH_VERSION), - FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted_branch) | - FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted_major) | - FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted_minor), + FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted->branch) | + FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted->major) | + FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted->minor), }; u32 response[GUC_MAX_MMIO_MSG_LEN]; int ret; @@ -106,120 +106,138 @@ static int guc_action_match_version(struct xe_guc *guc, if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0]))) return -EPROTO; - *branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]); - *major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]); - *minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]); - *patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]); + memset(found, 0, sizeof(struct xe_uc_fw_version)); + found->branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]); + found->major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]); + found->minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]); + found->patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]); return 0; } -static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor) +static int guc_action_match_version_any(struct xe_guc *guc, + struct xe_uc_fw_version *found) +{ + struct xe_uc_fw_version wanted = { + .branch = GUC_VERSION_BRANCH_ANY, + .major = GUC_VERSION_MAJOR_ANY, + .minor = GUC_VERSION_MINOR_ANY, + .patch = 0 + }; + + return guc_action_match_version(guc, &wanted, found); +} + +static void vf_minimum_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver) { struct xe_device *xe = gt_to_xe(gt); + memset(ver, 0, sizeof(struct xe_uc_fw_version)); + switch (xe->info.platform) { case XE_TIGERLAKE ... XE_PVC: /* 1.1 this is current baseline for Xe driver */ - *branch = 0; - *major = 1; - *minor = 1; + ver->branch = 0; + ver->major = 1; + ver->minor = 1; break; default: /* 1.2 has support for the GMD_ID KLV */ - *branch = 0; - *major = 1; - *minor = 2; + ver->branch = 0; + ver->major = 1; + ver->minor = 2; break; } } -static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor) +static void vf_wanted_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver) { /* for now it's the same as minimum */ - return vf_minimum_guc_version(gt, branch, major, minor); + return vf_minimum_guc_version(gt, ver); } static int vf_handshake_with_guc(struct xe_gt *gt) { - struct xe_gt_sriov_vf_guc_version *guc_version = >->sriov.vf.guc_version; + struct xe_uc_fw_version *guc_version = >->sriov.vf.guc_version; + struct xe_uc_fw_version wanted = {0}; struct xe_guc *guc = >->uc.guc; - u32 wanted_branch, wanted_major, wanted_minor; - u32 branch, major, minor, patch; + bool old = false; int err; xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); /* select wanted version - prefer previous (if any) */ if (guc_version->major || guc_version->minor) { - wanted_branch = guc_version->branch; - wanted_major = guc_version->major; - wanted_minor = guc_version->minor; + wanted = *guc_version; + old = true; } else { - vf_wanted_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor); - xe_gt_assert(gt, wanted_major != GUC_VERSION_MAJOR_ANY); + vf_wanted_guc_version(gt, &wanted); + xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY); + + /* First time we handshake, so record the minimum wanted */ + gt->sriov.vf.wanted_guc_version = wanted; } - err = guc_action_match_version(guc, wanted_branch, wanted_major, wanted_minor, - &branch, &major, &minor, &patch); + err = guc_action_match_version(guc, &wanted, guc_version); if (unlikely(err)) goto fail; - /* we don't support interface version change */ - if ((guc_version->major || guc_version->minor) && - (guc_version->branch != branch || guc_version->major != major || - guc_version->minor != minor)) { - xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n", - branch, major, minor, patch); - xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n", - guc_version->branch, guc_version->major, - guc_version->minor, guc_version->patch); - err = -EREMCHG; - goto fail; + if (old) { + /* we don't support interface version change */ + if (MAKE_GUC_VER_STRUCT(*guc_version) != MAKE_GUC_VER_STRUCT(wanted)) { + xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n", + guc_version->branch, guc_version->major, + guc_version->minor, guc_version->patch); + xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n", + wanted.branch, wanted.major, + wanted.minor, wanted.patch); + err = -EREMCHG; + goto fail; + } else { + /* version is unchanged, no need to re-verify it */ + return 0; + } } /* illegal */ - if (major > wanted_major) { + if (guc_version->major > wanted.major) { err = -EPROTO; goto unsupported; } /* there's no fallback on major version. */ - if (major != wanted_major) { + if (guc_version->major != wanted.major) { err = -ENOPKG; goto unsupported; } /* check against minimum version supported by us */ - vf_minimum_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor); - xe_gt_assert(gt, major != GUC_VERSION_MAJOR_ANY); - if (major < wanted_major || (major == wanted_major && minor < wanted_minor)) { + vf_minimum_guc_version(gt, &wanted); + xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY); + if (MAKE_GUC_VER_STRUCT(*guc_version) < MAKE_GUC_VER_STRUCT(wanted)) { err = -ENOKEY; goto unsupported; } xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n", - branch, major, minor, patch); + guc_version->branch, guc_version->major, + guc_version->minor, guc_version->patch); - guc_version->branch = branch; - guc_version->major = major; - guc_version->minor = minor; - guc_version->patch = patch; return 0; unsupported: xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n", - branch, major, minor, patch, ERR_PTR(err)); + guc_version->branch, guc_version->major, + guc_version->minor, guc_version->patch, + ERR_PTR(err)); fail: xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n", - wanted_major, wanted_minor, ERR_PTR(err)); + wanted.major, wanted.minor, ERR_PTR(err)); /* try again with *any* just to query which version is supported */ - if (!guc_action_match_version(guc, GUC_VERSION_BRANCH_ANY, - GUC_VERSION_MAJOR_ANY, GUC_VERSION_MINOR_ANY, - &branch, &major, &minor, &patch)) + if (!guc_action_match_version_any(guc, &wanted)) xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n", - branch, major, minor, patch); + wanted.branch, wanted.major, wanted.minor, wanted.patch); return err; } @@ -250,6 +268,29 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt) return 0; } +/** + * xe_gt_sriov_vf_guc_versions - Minimum required and found GuC ABI versions + * @gt: the &xe_gt + * @wanted: pointer to the xe_uc_fw_version to be filled with the wanted version + * @found: pointer to the xe_uc_fw_version to be filled with the found version + * + * This function is for VF use only and it can only be used after successful + * version handshake with the GuC. + */ +void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt, + struct xe_uc_fw_version *wanted, + struct xe_uc_fw_version *found) +{ + xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + xe_gt_assert(gt, gt->sriov.vf.guc_version.major); + + if (wanted) + *wanted = gt->sriov.vf.wanted_guc_version; + + if (found) + *found = gt->sriov.vf.guc_version; +} + static int guc_action_vf_notify_resfix_done(struct xe_guc *guc) { u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = { @@ -415,6 +456,7 @@ static int vf_get_ggtt_info(struct xe_gt *gt) xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n", start, start + size - 1, size / SZ_1K); + config->ggtt_shift = start - (s64)config->ggtt_base; config->ggtt_base = start; config->ggtt_size = size; @@ -560,106 +602,56 @@ u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt) return gt->sriov.vf.self_config.lmem_size; } -static struct xe_ggtt_node * -vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end) -{ - struct xe_ggtt_node *node; - int err; - - node = xe_ggtt_node_init(ggtt); - if (IS_ERR(node)) - return node; - - err = xe_ggtt_node_insert_balloon(node, start, end); - if (err) { - xe_ggtt_node_fini(node); - return ERR_PTR(err); - } - - return node; -} - -static int vf_balloon_ggtt(struct xe_gt *gt) +/** + * xe_gt_sriov_vf_ggtt - VF GGTT configuration. + * @gt: the &xe_gt + * + * This function is for VF use only. + * + * Return: size of the GGTT assigned to VF. + */ +u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt) { - struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; - struct xe_tile *tile = gt_to_tile(gt); - struct xe_ggtt *ggtt = tile->mem.ggtt; - struct xe_device *xe = gt_to_xe(gt); - u64 start, end; - - xe_gt_assert(gt, IS_SRIOV_VF(xe)); - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); - - if (!config->ggtt_size) - return -ENODATA; - - /* - * VF can only use part of the GGTT as allocated by the PF: - * - * WOPCM GUC_GGTT_TOP - * |<------------ Total GGTT size ------------------>| - * - * VF GGTT base -->|<- size ->| - * - * +--------------------+----------+-----------------+ - * |////////////////////| block |\\\\\\\\\\\\\\\\\| - * +--------------------+----------+-----------------+ - * - * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->| - */ - - start = xe_wopcm_size(xe); - end = config->ggtt_base; - if (end != start) { - tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end); - if (IS_ERR(tile->sriov.vf.ggtt_balloon[0])) - return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]); - } - - start = config->ggtt_base + config->ggtt_size; - end = GUC_GGTT_TOP; - if (end != start) { - tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end); - if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) { - xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]); - return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]); - } - } + xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + xe_gt_assert(gt, gt->sriov.vf.guc_version.major); + xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size); - return 0; + return gt->sriov.vf.self_config.ggtt_size; } -static void deballoon_ggtt(struct drm_device *drm, void *arg) +/** + * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset. + * @gt: the &xe_gt + * + * This function is for VF use only. + * + * Return: base offset of the GGTT assigned to VF. + */ +u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt) { - struct xe_tile *tile = arg; + xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + xe_gt_assert(gt, gt->sriov.vf.guc_version.major); + xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size); - xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile))); - xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]); - xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]); + return gt->sriov.vf.self_config.ggtt_base; } /** - * xe_gt_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration. - * @gt: the &xe_gt + * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration + * @gt: the &xe_gt struct instance * * This function is for VF use only. * - * Return: 0 on success or a negative error code on failure. + * Return: The shift value; could be negative */ -int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt) +s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt) { - struct xe_tile *tile = gt_to_tile(gt); - struct xe_device *xe = tile_to_xe(tile); - int err; - - if (xe_gt_is_media_type(gt)) - return 0; + struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; - err = vf_balloon_ggtt(gt); - if (err) - return err; + xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + xe_gt_assert(gt, !xe_gt_is_media_type(gt)); - return drmm_add_action_or_reset(&xe->drm, deballoon_ggtt, tile); + return config->ggtt_shift; } static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor) @@ -1043,6 +1035,8 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p) string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf)); drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf); + drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift); + if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) { string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf)); drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf); @@ -1079,19 +1073,20 @@ void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p) */ void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p) { - struct xe_gt_sriov_vf_guc_version *guc_version = >->sriov.vf.guc_version; + struct xe_uc_fw_version *guc_version = >->sriov.vf.guc_version; + struct xe_uc_fw_version *wanted = >->sriov.vf.wanted_guc_version; struct xe_gt_sriov_vf_relay_version *pf_version = >->sriov.vf.pf_version; - u32 branch, major, minor; + struct xe_uc_fw_version ver; xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); drm_printf(p, "GuC ABI:\n"); - vf_minimum_guc_version(gt, &branch, &major, &minor); - drm_printf(p, "\tbase:\t%u.%u.%u.*\n", branch, major, minor); + vf_minimum_guc_version(gt, &ver); + drm_printf(p, "\tbase:\t%u.%u.%u.*\n", ver.branch, ver.major, ver.minor); - vf_wanted_guc_version(gt, &branch, &major, &minor); - drm_printf(p, "\twanted:\t%u.%u.%u.*\n", branch, major, minor); + drm_printf(p, "\twanted:\t%u.%u.%u.*\n", + wanted->branch, wanted->major, wanted->minor); drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n", guc_version->branch, guc_version->major, diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h index ba6c5d74e326..e0357f341a2d 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h @@ -11,19 +11,26 @@ struct drm_printer; struct xe_gt; struct xe_reg; +struct xe_uc_fw_version; int xe_gt_sriov_vf_reset(struct xe_gt *gt); int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt); +void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt, + struct xe_uc_fw_version *wanted, + struct xe_uc_fw_version *found); int xe_gt_sriov_vf_query_config(struct xe_gt *gt); int xe_gt_sriov_vf_connect(struct xe_gt *gt); int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt); -int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt); int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt); void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt); u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt); u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt); u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt); +u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt); +u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt); +s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt); + u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg); void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h index a57f13b5afcd..ef041679e9d4 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h @@ -7,20 +7,7 @@ #define _XE_GT_SRIOV_VF_TYPES_H_ #include <linux/types.h> - -/** - * struct xe_gt_sriov_vf_guc_version - GuC ABI version details. - */ -struct xe_gt_sriov_vf_guc_version { - /** @branch: branch version. */ - u8 branch; - /** @major: major version. */ - u8 major; - /** @minor: minor version. */ - u8 minor; - /** @patch: patch version. */ - u8 patch; -}; +#include "xe_uc_fw_types.h" /** * struct xe_gt_sriov_vf_relay_version - PF ABI version details. @@ -40,6 +27,8 @@ struct xe_gt_sriov_vf_selfconfig { u64 ggtt_base; /** @ggtt_size: assigned size of the GGTT region. */ u64 ggtt_size; + /** @ggtt_shift: difference in ggtt_base on last migration */ + s64 ggtt_shift; /** @lmem_size: assigned size of the LMEM. */ u64 lmem_size; /** @num_ctxs: assigned number of GuC submission context IDs. */ @@ -71,8 +60,10 @@ struct xe_gt_sriov_vf_runtime { * struct xe_gt_sriov_vf - GT level VF virtualization data. */ struct xe_gt_sriov_vf { + /** @wanted_guc_version: minimum wanted GuC ABI version. */ + struct xe_uc_fw_version wanted_guc_version; /** @guc_version: negotiated GuC ABI version. */ - struct xe_gt_sriov_vf_guc_version guc_version; + struct xe_uc_fw_version guc_version; /** @self_config: resource configurations. */ struct xe_gt_sriov_vf_selfconfig self_config; /** @pf_version: negotiated VF/PF ABI version. */ diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 084cbdeba8ea..6088df8e159c 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -138,6 +138,14 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt) int pending_seqno; /* + * we can get here before the CTs are even initialized if we're wedging + * very early, in which case there are not going to be any pending + * fences so we can bail immediately. + */ + if (!xe_guc_ct_initialized(>->uc.guc.ct)) + return; + + /* * CT channel is already disabled at this point. No new TLB requests can * appear. */ @@ -441,30 +449,6 @@ void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm) } /** - * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA - * @gt: GT structure - * @fence: invalidation fence which will be signal on TLB invalidation - * completion, can be NULL - * @vma: VMA to invalidate - * - * Issue a range based TLB invalidation if supported, if not fallback to a full - * TLB invalidation. Completion of TLB is asynchronous and caller can use - * the invalidation fence to wait for completion. - * - * Return: Negative error code on error, 0 on success - */ -int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, - struct xe_gt_tlb_invalidation_fence *fence, - struct xe_vma *vma) -{ - xe_gt_assert(gt, vma); - - return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma), - xe_vma_end(vma), - xe_vma_vm(vma)->usm.asid); -} - -/** * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler * @guc: guc * @msg: message indicating TLB invalidation done diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h index abe9b03d543e..31072dbcad8e 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h @@ -19,9 +19,6 @@ int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt); void xe_gt_tlb_invalidation_reset(struct xe_gt *gt); int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt); -int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, - struct xe_gt_tlb_invalidation_fence *fence, - struct xe_vma *vma); void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm); int xe_gt_tlb_invalidation_range(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c index 516c81e3b8dd..305939c69747 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.c +++ b/drivers/gpu/drm/xe/xe_gt_topology.c @@ -12,23 +12,20 @@ #include "regs/xe_gt_regs.h" #include "xe_assert.h" #include "xe_gt.h" +#include "xe_gt_printk.h" #include "xe_mmio.h" #include "xe_wa.h" -static void -load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...) +static void load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, + const struct xe_reg regs[]) { - va_list argp; u32 fuse_val[XE_MAX_DSS_FUSE_REGS] = {}; int i; - if (drm_WARN_ON(>_to_xe(gt)->drm, numregs > XE_MAX_DSS_FUSE_REGS)) - numregs = XE_MAX_DSS_FUSE_REGS; + xe_gt_assert(gt, numregs <= ARRAY_SIZE(fuse_val)); - va_start(argp, numregs); for (i = 0; i < numregs; i++) - fuse_val[i] = xe_mmio_read32(>->mmio, va_arg(argp, struct xe_reg)); - va_end(argp); + fuse_val[i] = xe_mmio_read32(>->mmio, regs[i]); bitmap_from_arr32(mask, fuse_val, numregs * 32); } @@ -218,9 +215,19 @@ get_num_dss_regs(struct xe_device *xe, int *geometry_regs, int *compute_regs) void xe_gt_topology_init(struct xe_gt *gt) { + static const struct xe_reg geometry_regs[] = { + XELP_GT_GEOMETRY_DSS_ENABLE, + XE2_GT_GEOMETRY_DSS_1, + XE2_GT_GEOMETRY_DSS_2, + }; + static const struct xe_reg compute_regs[] = { + XEHP_GT_COMPUTE_DSS_ENABLE, + XEHPC_GT_COMPUTE_DSS_ENABLE_EXT, + XE2_GT_COMPUTE_DSS_2, + }; + int num_geometry_regs, num_compute_regs; struct xe_device *xe = gt_to_xe(gt); struct drm_printer p; - int num_geometry_regs, num_compute_regs; get_num_dss_regs(xe, &num_geometry_regs, &num_compute_regs); @@ -228,23 +235,18 @@ xe_gt_topology_init(struct xe_gt *gt) * Register counts returned shouldn't exceed the number of registers * passed as parameters below. */ - drm_WARN_ON(&xe->drm, num_geometry_regs > 3); - drm_WARN_ON(&xe->drm, num_compute_regs > 3); + xe_gt_assert(gt, num_geometry_regs <= ARRAY_SIZE(geometry_regs)); + xe_gt_assert(gt, num_compute_regs <= ARRAY_SIZE(compute_regs)); load_dss_mask(gt, gt->fuse_topo.g_dss_mask, - num_geometry_regs, - XELP_GT_GEOMETRY_DSS_ENABLE, - XE2_GT_GEOMETRY_DSS_1, - XE2_GT_GEOMETRY_DSS_2); - load_dss_mask(gt, gt->fuse_topo.c_dss_mask, num_compute_regs, - XEHP_GT_COMPUTE_DSS_ENABLE, - XEHPC_GT_COMPUTE_DSS_ENABLE_EXT, - XE2_GT_COMPUTE_DSS_2); + num_geometry_regs, geometry_regs); + load_dss_mask(gt, gt->fuse_topo.c_dss_mask, + num_compute_regs, compute_regs); + load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss, >->fuse_topo.eu_type); load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask); - p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology"); - + p = xe_gt_dbg_printer(gt); xe_gt_topology_dump(gt, &p); } diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index bac5471a1a78..209e5d53c290 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -710,6 +710,10 @@ static int vf_guc_init_post_hwconfig(struct xe_guc *guc) if (err) return err; + err = xe_guc_buf_cache_init(&guc->buf); + if (err) + return err; + /* XXX xe_guc_db_mgr_init not needed for now */ return 0; @@ -1098,14 +1102,6 @@ static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc) struct xe_gt *gt = guc_to_gt(guc); int ret; - ret = xe_gt_sriov_vf_bootstrap(gt); - if (ret) - return ret; - - ret = xe_gt_sriov_vf_query_config(gt); - if (ret) - return ret; - ret = xe_guc_hwconfig_init(guc); if (ret) return ret; @@ -1285,6 +1281,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, struct xe_reg reply_reg = xe_gt_is_media_type(gt) ? MED_VF_SW_FLAG(0) : VF_SW_FLAG(0); const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1; + bool lost = false; int ret; int i; @@ -1318,6 +1315,12 @@ retry: FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC), 50000, &reply, false); if (ret) { + /* scratch registers might be cleared during FLR, try once more */ + if (!reply && !lost) { + xe_gt_dbg(gt, "GuC mmio request %#x: lost, trying again\n", request[0]); + lost = true; + goto retry; + } timeout: xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n", request[0], reply); diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index 44c1fa2fe7c8..07a027755627 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -20,6 +20,7 @@ #include "xe_gt_ccs_mode.h" #include "xe_gt_printk.h" #include "xe_guc.h" +#include "xe_guc_buf.h" #include "xe_guc_capture.h" #include "xe_guc_ct.h" #include "xe_hw_engine.h" @@ -994,6 +995,16 @@ static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_off return xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); } +static int guc_ads_update_policies(struct xe_guc_ads *ads, const struct guc_policies *policies) +{ + CLASS(xe_guc_buf_from_data, buf)(&ads_to_guc(ads)->buf, policies, sizeof(*policies)); + + if (!xe_guc_buf_is_valid(buf)) + return -ENOBUFS; + + return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf)); +} + /** * xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy * @ads: Additional data structures object @@ -1005,11 +1016,8 @@ static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_off int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads) { struct xe_device *xe = ads_to_xe(ads); - struct xe_gt *gt = ads_to_gt(ads); - struct xe_tile *tile = gt_to_tile(gt); struct guc_policies *policies; - struct xe_bo *bo; - int ret = 0; + int ret; policies = kmalloc(sizeof(*policies), GFP_KERNEL); if (!policies) @@ -1023,16 +1031,7 @@ int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads) else policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET; - bo = xe_managed_bo_create_from_data(xe, tile, policies, sizeof(struct guc_policies), - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_GGTT); - if (IS_ERR(bo)) { - ret = PTR_ERR(bo); - goto out; - } - - ret = guc_ads_action_update_policies(ads, xe_bo_ggtt_addr(bo)); -out: + ret = guc_ads_update_policies(ads, policies); kfree(policies); return ret; } diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c index 0193c94dd6a0..14a07dca48e7 100644 --- a/drivers/gpu/drm/xe/xe_guc_buf.c +++ b/drivers/gpu/drm/xe/xe_guc_buf.c @@ -37,10 +37,6 @@ int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache) struct xe_gt *gt = cache_to_gt(cache); struct xe_sa_manager *sam; - /* XXX: currently it's useful only for the PF actions */ - if (!IS_SRIOV_PF(gt_to_xe(gt))) - return 0; - sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32)); if (IS_ERR(sam)) return PTR_ERR(sam); diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 2447de0ebedf..37509f619503 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -25,6 +25,7 @@ #include "xe_gt_printk.h" #include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_monitor.h" +#include "xe_gt_sriov_printk.h" #include "xe_gt_tlb_invalidation.h" #include "xe_guc.h" #include "xe_guc_log.h" @@ -84,6 +85,8 @@ struct g2h_fence { bool done; }; +#define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo))) + static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer) { g2h_fence->response_buffer = response_buffer; @@ -514,6 +517,9 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct) */ void xe_guc_ct_stop(struct xe_guc_ct *ct) { + if (!xe_guc_ct_initialized(ct)) + return; + xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED); stop_g2h_handler(ct); } @@ -625,6 +631,47 @@ static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) spin_unlock_irq(&ct->fast_lock); } +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action) +{ + unsigned int slot = fence % ARRAY_SIZE(ct->fast_req); +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC) + unsigned long entries[SZ_32]; + unsigned int n; + + n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); + + /* May be called under spinlock, so avoid sleeping */ + ct->fast_req[slot].stack = stack_depot_save(entries, n, GFP_NOWAIT); +#endif + ct->fast_req[slot].fence = fence; + ct->fast_req[slot].action = action; +} +#else +static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action) +{ +} +#endif + +/* + * The CT protocol accepts a 16 bits fence. This field is fully owned by the + * driver, the GuC will just copy it to the reply message. Since we need to + * be able to distinguish between replies to REQUEST and FAST_REQUEST messages, + * we use one bit of the seqno as an indicator for that and a rolling counter + * for the remaining 15 bits. + */ +#define CT_SEQNO_MASK GENMASK(14, 0) +#define CT_SEQNO_UNTRACKED BIT(15) +static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence) +{ + u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK; + + if (!is_g2h_fence) + seqno |= CT_SEQNO_UNTRACKED; + + return seqno; +} + #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, @@ -701,6 +748,9 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | GUC_HXG_EVENT_MSG_0_DATA0, action[0]); } else { + fast_req_track(ct, ct_fence_value, + FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0])); + cmd[1] = FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) | FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | @@ -733,25 +783,6 @@ corrupted: return -EPIPE; } -/* - * The CT protocol accepts a 16 bits fence. This field is fully owned by the - * driver, the GuC will just copy it to the reply message. Since we need to - * be able to distinguish between replies to REQUEST and FAST_REQUEST messages, - * we use one bit of the seqno as an indicator for that and a rolling counter - * for the remaining 15 bits. - */ -#define CT_SEQNO_MASK GENMASK(14, 0) -#define CT_SEQNO_UNTRACKED BIT(15) -static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence) -{ - u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK; - - if (!is_g2h_fence) - seqno |= CT_SEQNO_UNTRACKED; - - return seqno; -} - static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence) @@ -760,7 +791,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u16 seqno; int ret; - xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); + xe_gt_assert(gt, xe_guc_ct_initialized(ct)); xe_gt_assert(gt, !g2h_len || !g2h_fence); xe_gt_assert(gt, !num_g2h || !g2h_fence); xe_gt_assert(gt, !g2h_len || num_g2h); @@ -1143,6 +1174,55 @@ static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action) return 0; } +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +static void fast_req_report(struct xe_guc_ct *ct, u16 fence) +{ + u16 fence_min = U16_MAX, fence_max = 0; + struct xe_gt *gt = ct_to_gt(ct); + bool found = false; + unsigned int n; +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC) + char *buf; +#endif + + lockdep_assert_held(&ct->lock); + + for (n = 0; n < ARRAY_SIZE(ct->fast_req); n++) { + if (ct->fast_req[n].fence < fence_min) + fence_min = ct->fast_req[n].fence; + if (ct->fast_req[n].fence > fence_max) + fence_max = ct->fast_req[n].fence; + + if (ct->fast_req[n].fence != fence) + continue; + found = true; + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC) + buf = kmalloc(SZ_4K, GFP_NOWAIT); + if (buf && stack_depot_snprint(ct->fast_req[n].stack, buf, SZ_4K, 0)) + xe_gt_err(gt, "Fence 0x%x was used by action %#04x sent at:\n%s", + fence, ct->fast_req[n].action, buf); + else + xe_gt_err(gt, "Fence 0x%x was used by action %#04x [failed to retrieve stack]\n", + fence, ct->fast_req[n].action); + kfree(buf); +#else + xe_gt_err(gt, "Fence 0x%x was used by action %#04x\n", + fence, ct->fast_req[n].action); +#endif + break; + } + + if (!found) + xe_gt_warn(gt, "Fence 0x%x not found - tracking buffer wrapped? [range = 0x%x -> 0x%x, next = 0x%X]\n", + fence, fence_min, fence_max, ct->fence_seqno); +} +#else +static void fast_req_report(struct xe_guc_ct *ct, u16 fence) +{ +} +#endif + static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) { struct xe_gt *gt = ct_to_gt(ct); @@ -1171,6 +1251,9 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) else xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n", type, fence); + + fast_req_report(ct, fence); + CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE); return -EPROTO; @@ -1344,7 +1427,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) u32 action; u32 *hxg; - xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); + xe_gt_assert(gt, xe_guc_ct_initialized(ct)); lockdep_assert_held(&ct->fast_lock); if (ct->state == XE_GUC_CT_STATE_DISABLED) @@ -1624,6 +1707,186 @@ static void g2h_worker_func(struct work_struct *w) receive_g2h(ct); } +static void xe_fixup_u64_in_cmds(struct xe_device *xe, struct iosys_map *cmds, + u32 size, u32 idx, s64 shift) +{ + u32 hi, lo; + u64 offset; + + lo = xe_map_rd_ring_u32(xe, cmds, idx, size); + hi = xe_map_rd_ring_u32(xe, cmds, idx + 1, size); + offset = make_u64(hi, lo); + offset += shift; + lo = lower_32_bits(offset); + hi = upper_32_bits(offset); + xe_map_wr_ring_u32(xe, cmds, idx, size, lo); + xe_map_wr_ring_u32(xe, cmds, idx + 1, size, hi); +} + +/* + * Shift any GGTT addresses within a single message left within CTB from + * before post-migration recovery. + * @ct: pointer to CT struct of the target GuC + * @cmds: iomap buffer containing CT messages + * @head: start of the target message within the buffer + * @len: length of the target message + * @size: size of the commands buffer + * @shift: the address shift to be added to each GGTT reference + * Return: true if the message was fixed or needed no fixups, false on failure + */ +static bool ct_fixup_ggtt_in_message(struct xe_guc_ct *ct, + struct iosys_map *cmds, u32 head, + u32 len, u32 size, s64 shift) +{ + struct xe_gt *gt = ct_to_gt(ct); + struct xe_device *xe = ct_to_xe(ct); + u32 msg[GUC_HXG_MSG_MIN_LEN]; + u32 action, i, n; + + xe_gt_assert(gt, len >= GUC_HXG_MSG_MIN_LEN); + + msg[0] = xe_map_rd_ring_u32(xe, cmds, head, size); + action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]); + + xe_gt_sriov_dbg_verbose(gt, "fixing H2G %#x\n", action); + + switch (action) { + case XE_GUC_ACTION_REGISTER_CONTEXT: + if (len != XE_GUC_REGISTER_CONTEXT_MSG_LEN) + goto err_len; + xe_fixup_u64_in_cmds(xe, cmds, size, head + + XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER, + shift); + xe_fixup_u64_in_cmds(xe, cmds, size, head + + XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER, + shift); + xe_fixup_u64_in_cmds(xe, cmds, size, head + + XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR, shift); + break; + case XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC: + if (len < XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN) + goto err_len; + n = xe_map_rd_ring_u32(xe, cmds, head + + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS, size); + if (len != XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN + 2 * n) + goto err_len; + xe_fixup_u64_in_cmds(xe, cmds, size, head + + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER, + shift); + xe_fixup_u64_in_cmds(xe, cmds, size, head + + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER, + shift); + for (i = 0; i < n; i++) + xe_fixup_u64_in_cmds(xe, cmds, size, head + + XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR + + 2 * i, shift); + break; + default: + break; + } + return true; + +err_len: + xe_gt_err(gt, "Skipped G2G %#x message fixups, unexpected length (%u)\n", action, len); + return false; +} + +/* + * Apply fixups to the next outgoing CT message within given CTB + * @ct: the &xe_guc_ct struct instance representing the target GuC + * @h2g: the &guc_ctb struct instance of the target buffer + * @shift: shift to be added to all GGTT addresses within the CTB + * @mhead: pointer to an integer storing message start position; the + * position is changed to next message before this function return + * @avail: size of the area available for parsing, that is length + * of all remaining messages stored within the CTB + * Return: size of the area available for parsing after one message + * has been parsed, that is length remaining from the updated mhead + */ +static int ct_fixup_ggtt_in_buffer(struct xe_guc_ct *ct, struct guc_ctb *h2g, + s64 shift, u32 *mhead, s32 avail) +{ + struct xe_gt *gt = ct_to_gt(ct); + struct xe_device *xe = ct_to_xe(ct); + u32 msg[GUC_HXG_MSG_MIN_LEN]; + u32 size = h2g->info.size; + u32 head = *mhead; + u32 len; + + xe_gt_assert(gt, avail >= (s32)GUC_CTB_MSG_MIN_LEN); + + /* Read header */ + msg[0] = xe_map_rd_ring_u32(xe, &h2g->cmds, head, size); + len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN; + + if (unlikely(len > (u32)avail)) { + xe_gt_err(gt, "H2G channel broken on read, avail=%d, len=%d, fixups skipped\n", + avail, len); + return 0; + } + + head = (head + GUC_CTB_MSG_MIN_LEN) % size; + if (!ct_fixup_ggtt_in_message(ct, &h2g->cmds, head, msg_len_to_hxg_len(len), size, shift)) + return 0; + *mhead = (head + msg_len_to_hxg_len(len)) % size; + + return avail - len; +} + +/** + * xe_guc_ct_fixup_messages_with_ggtt - Fixup any pending H2G CTB messages + * @ct: pointer to CT struct of the target GuC + * @ggtt_shift: shift to be added to all GGTT addresses within the CTB + * + * Messages in GuC to Host CTB are owned by GuC and any fixups in them + * are made by GuC. But content of the Host to GuC CTB is owned by the + * KMD, so fixups to GGTT references in any pending messages need to be + * applied here. + * This function updates GGTT offsets in payloads of pending H2G CTB + * messages (messages which were not consumed by GuC before the VF got + * paused). + */ +void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift) +{ + struct guc_ctb *h2g = &ct->ctbs.h2g; + struct xe_guc *guc = ct_to_guc(ct); + struct xe_gt *gt = guc_to_gt(guc); + u32 head, tail, size; + s32 avail; + + if (unlikely(h2g->info.broken)) + return; + + h2g->info.head = desc_read(ct_to_xe(ct), h2g, head); + head = h2g->info.head; + tail = READ_ONCE(h2g->info.tail); + size = h2g->info.size; + + if (unlikely(head > size)) + goto corrupted; + + if (unlikely(tail >= size)) + goto corrupted; + + avail = tail - head; + + /* beware of buffer wrap case */ + if (unlikely(avail < 0)) + avail += size; + xe_gt_dbg(gt, "available %d (%u:%u:%u)\n", avail, head, tail, size); + xe_gt_assert(gt, avail >= 0); + + while (avail > 0) + avail = ct_fixup_ggtt_in_buffer(ct, h2g, ggtt_shift, &head, avail); + + return; + +corrupted: + xe_gt_err(gt, "Corrupted H2G descriptor head=%u tail=%u size=%u, fixups not applied\n", + head, tail, size); + h2g->info.broken = true; +} + static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic, bool want_ctb) { @@ -1770,6 +2033,24 @@ void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb) } #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION +/* + * This is a helper function which assists the driver in identifying if a fault + * injection test is currently active, allowing it to reduce unnecessary debug + * output. Typically, the function returns zero, but the fault injection + * framework can alter this to return an error. Since faults are injected + * through this function, it's important to ensure the compiler doesn't optimize + * it into an inline function. To avoid such optimization, the 'noinline' + * attribute is applied. Compiler optimizes the static function defined in the + * header file as an inline function. + */ +noinline int xe_is_injection_active(void) { return 0; } +ALLOW_ERROR_INJECTION(xe_is_injection_active, ERRNO); +#else +int xe_is_injection_active(void) { return 0; } +#endif + static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code) { struct xe_guc_log_snapshot *snapshot_log; @@ -1780,6 +2061,12 @@ static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reaso if (ctb) ctb->info.broken = true; + /* + * Huge dump is getting generated when injecting error for guc CT/MMIO + * functions. So, let us suppress the dump when fault is injected. + */ + if (xe_is_injection_active()) + return; /* Ignore further errors after the first dump until a reset */ if (ct->dead.reported) @@ -1830,7 +2117,6 @@ static void ct_dead_print(struct xe_dead_ct *dead) return; } - /* Can't generate a genuine core dump at this point, so just do the good bits */ drm_puts(&lp, "**** Xe Device Coredump ****\n"); drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason); diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h index 82c4ae458dda..99c5dec446f2 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.h +++ b/drivers/gpu/drm/xe/xe_guc_ct.h @@ -22,6 +22,13 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_pr void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot); void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb); +void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift); + +static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct) +{ + return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED; +} + static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct) { return ct->state == XE_GUC_CT_STATE_ENABLED; diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h index 8e1b9d981d61..8b03b50313d9 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct_types.h +++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/iosys-map.h> #include <linux/spinlock_types.h> +#include <linux/stackdepot.h> #include <linux/wait.h> #include <linux/xarray.h> @@ -104,6 +105,18 @@ struct xe_dead_ct { /** snapshot_log: copy of GuC log at point of error */ struct xe_guc_log_snapshot *snapshot_log; }; + +/** struct xe_fast_req_fence - Used to track FAST_REQ messages by fence to match error responses */ +struct xe_fast_req_fence { + /** @fence: sequence number sent in H2G and return in G2H error */ + u16 fence; + /** @action: H2G action code */ + u16 action; +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC) + /** @stack: call stack from when the H2G was sent */ + depot_stack_handle_t stack; +#endif +}; #endif /** @@ -152,6 +165,8 @@ struct xe_guc_ct { #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) /** @dead: information for debugging dead CTs */ struct xe_dead_ct dead; + /** @fast_req: history of FAST_REQ messages for matching with G2H error responses */ + struct xe_fast_req_fence fast_req[SZ_32]; #endif }; diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c index 0fb48f8f05d8..92e1f9f41b8c 100644 --- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c +++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c @@ -124,7 +124,7 @@ static void free_engine_activity_buffers(struct engine_activity_buffer *buffer) static bool is_engine_activity_supported(struct xe_guc *guc) { struct xe_uc_fw_version *version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY]; - struct xe_uc_fw_version required = { 1, 14, 1 }; + struct xe_uc_fw_version required = { .major = 1, .minor = 14, .patch = 1 }; struct xe_gt *gt = guc_to_gt(guc); if (IS_SRIOV_VF(gt_to_xe(gt))) { diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h index 5b896f5fafaf..f1e2b0be90a9 100644 --- a/drivers/gpu/drm/xe/xe_guc_log.h +++ b/drivers/gpu/drm/xe/xe_guc_log.h @@ -12,7 +12,7 @@ struct drm_printer; struct xe_device; -#if IS_ENABLED(CONFIG_DRM_XE_LARGE_GUC_BUFFER) +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC) #define CRASH_BUFFER_SIZE SZ_1M #define DEBUG_BUFFER_SIZE SZ_8M #define CAPTURE_BUFFER_SIZE SZ_2M diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 18c623992035..9fab5f5b10fa 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -51,6 +51,7 @@ #define LNL_MERT_FREQ_CAP 800 #define BMG_MERT_FREQ_CAP 2133 +#define BMG_MIN_FREQ 1200 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */ #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */ @@ -153,7 +154,7 @@ static int pc_action_reset(struct xe_guc_pc *pc) int ret; ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); - if (ret) + if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n", ERR_PTR(ret)); @@ -177,7 +178,7 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc) /* Blocking here to ensure the results are ready before reading them */ ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action)); - if (ret) + if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n", ERR_PTR(ret)); @@ -200,7 +201,7 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value) return -EAGAIN; ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); - if (ret) + if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n", id, value, ERR_PTR(ret)); @@ -222,7 +223,7 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id) return -EAGAIN; ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); - if (ret) + if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe", ERR_PTR(ret)); @@ -239,7 +240,7 @@ static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode) int ret; ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); - if (ret) + if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n", mode, ERR_PTR(ret)); return ret; @@ -817,6 +818,7 @@ void xe_guc_pc_init_early(struct xe_guc_pc *pc) static int pc_adjust_freq_bounds(struct xe_guc_pc *pc) { + struct xe_tile *tile = gt_to_tile(pc_to_gt(pc)); int ret; lockdep_assert_held(&pc->freq_lock); @@ -843,6 +845,9 @@ static int pc_adjust_freq_bounds(struct xe_guc_pc *pc) if (pc_get_min_freq(pc) > pc->rp0_freq) ret = pc_set_min_freq(pc, pc->rp0_freq); + if (XE_WA(tile->primary_gt, 14022085890)) + ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc))); + out: return ret; } @@ -1068,7 +1073,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) goto out; } - memset(pc->bo->vmap.vaddr, 0, size); + xe_map_memset(xe, &pc->bo->vmap, 0, 0, size); slpc_shared_data_write(pc, header.size, size); earlier = ktime_get(); diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 6d84a52b660a..7170e78e5b8e 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -498,6 +498,15 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc, action[len++] = upper_32_bits(xe_lrc_descriptor(lrc)); } + /* explicitly checks some fields that we might fixup later */ + xe_gt_assert(guc_to_gt(guc), info->wq_desc_lo == + action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER]); + xe_gt_assert(guc_to_gt(guc), info->wq_base_lo == + action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER]); + xe_gt_assert(guc_to_gt(guc), q->width == + action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS]); + xe_gt_assert(guc_to_gt(guc), info->hwlrca_lo == + action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR]); xe_gt_assert(guc_to_gt(guc), len <= MAX_MLRC_REG_SIZE); #undef MAX_MLRC_REG_SIZE @@ -522,6 +531,14 @@ static void __register_exec_queue(struct xe_guc *guc, info->hwlrca_hi, }; + /* explicitly checks some fields that we might fixup later */ + xe_gt_assert(guc_to_gt(guc), info->wq_desc_lo == + action[XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER]); + xe_gt_assert(guc_to_gt(guc), info->wq_base_lo == + action[XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER]); + xe_gt_assert(guc_to_gt(guc), info->hwlrca_lo == + action[XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR]); + xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0); } @@ -970,10 +987,7 @@ static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job) */ xe_gt_assert(gt, timeout_ms < 100 * MSEC_PER_SEC); - if (ctx_timestamp < ctx_job_timestamp) - diff = ctx_timestamp + U32_MAX - ctx_job_timestamp; - else - diff = ctx_timestamp - ctx_job_timestamp; + diff = ctx_timestamp - ctx_job_timestamp; /* * Ensure timeout is within 5% to account for an GuC scheduling latency @@ -1762,6 +1776,9 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc) { int ret; + if (!guc->submission_state.initialized) + return 0; + /* * Using an atomic here rather than submission_state.lock as this * function can be called while holding the CT lock (engine reset diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index 93241fd0a4ba..3439c8522d01 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -17,6 +17,7 @@ #include "regs/xe_irq_regs.h" #include "xe_assert.h" #include "xe_bo.h" +#include "xe_configfs.h" #include "xe_device.h" #include "xe_execlist.h" #include "xe_force_wake.h" @@ -693,7 +694,7 @@ static void read_media_fuses(struct xe_gt *gt) if (!(BIT(j) & vdbox_mask)) { gt->info.engine_mask &= ~BIT(i); - drm_info(&xe->drm, "vcs%u fused off\n", j); + xe_gt_info(gt, "vcs%u fused off\n", j); } } @@ -703,7 +704,7 @@ static void read_media_fuses(struct xe_gt *gt) if (!(BIT(j) & vebox_mask)) { gt->info.engine_mask &= ~BIT(i); - drm_info(&xe->drm, "vecs%u fused off\n", j); + xe_gt_info(gt, "vecs%u fused off\n", j); } } } @@ -728,15 +729,13 @@ static void read_copy_fuses(struct xe_gt *gt) if (!(BIT(j / 2) & bcs_mask)) { gt->info.engine_mask &= ~BIT(i); - drm_info(&xe->drm, "bcs%u fused off\n", j); + xe_gt_info(gt, "bcs%u fused off\n", j); } } } static void read_compute_fuses_from_dss(struct xe_gt *gt) { - struct xe_device *xe = gt_to_xe(gt); - /* * CCS fusing based on DSS masks only applies to platforms that can * have more than one CCS. @@ -755,14 +754,13 @@ static void read_compute_fuses_from_dss(struct xe_gt *gt) if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) { gt->info.engine_mask &= ~BIT(i); - drm_info(&xe->drm, "ccs%u fused off\n", j); + xe_gt_info(gt, "ccs%u fused off\n", j); } } } static void read_compute_fuses_from_reg(struct xe_gt *gt) { - struct xe_device *xe = gt_to_xe(gt); u32 ccs_mask; ccs_mask = xe_mmio_read32(>->mmio, XEHP_FUSE4); @@ -774,7 +772,7 @@ static void read_compute_fuses_from_reg(struct xe_gt *gt) if ((ccs_mask & BIT(j)) == 0) { gt->info.engine_mask &= ~BIT(i); - drm_info(&xe->drm, "ccs%u fused off\n", j); + xe_gt_info(gt, "ccs%u fused off\n", j); } } } @@ -789,8 +787,6 @@ static void read_compute_fuses(struct xe_gt *gt) static void check_gsc_availability(struct xe_gt *gt) { - struct xe_device *xe = gt_to_xe(gt); - if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))) return; @@ -806,7 +802,25 @@ static void check_gsc_availability(struct xe_gt *gt) xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_ENABLE, 0); xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_MASK, ~0); - drm_dbg(&xe->drm, "GSC FW not used, disabling gsccs\n"); + xe_gt_dbg(gt, "GSC FW not used, disabling gsccs\n"); + } +} + +static void check_sw_disable(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + u64 sw_allowed = xe_configfs_get_engines_allowed(to_pci_dev(xe->drm.dev)); + enum xe_hw_engine_id id; + + for (id = 0; id < XE_NUM_HW_ENGINES; ++id) { + if (!(gt->info.engine_mask & BIT(id))) + continue; + + if (!(sw_allowed & BIT(id))) { + gt->info.engine_mask &= ~BIT(id); + xe_gt_info(gt, "%s disabled via configfs\n", + engine_infos[id].name); + } } } @@ -818,6 +832,7 @@ int xe_hw_engines_init_early(struct xe_gt *gt) read_copy_fuses(gt); read_compute_fuses(gt); check_gsc_availability(gt); + check_sw_disable(gt); BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN); BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX); diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index 74f31639b37f..f08fc4377d25 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -20,6 +20,8 @@ #include "xe_pcode_api.h" #include "xe_sriov.h" #include "xe_pm.h" +#include "xe_vsec.h" +#include "regs/xe_pmt.h" enum xe_hwmon_reg { REG_TEMP, @@ -51,6 +53,14 @@ enum xe_fan_channel { FAN_MAX, }; +/* Attribute index for powerX_xxx_interval sysfs entries */ +enum sensor_attr_power { + SENSOR_INDEX_PSYS_PL1, + SENSOR_INDEX_PKG_PL1, + SENSOR_INDEX_PSYS_PL2, + SENSOR_INDEX_PKG_PL2, +}; + /* * For platforms that support mailbox commands for power limits, REG_PKG_POWER_SKU_UNIT is * not supported and below are SKU units to be used. @@ -72,8 +82,9 @@ enum xe_fan_channel { * PL*_HWMON_ATTR - mapping of hardware power limits to corresponding hwmon power attribute. */ #define PL1_HWMON_ATTR hwmon_power_max +#define PL2_HWMON_ATTR hwmon_power_cap -#define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "Invalid") +#define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "PL2") /* * Timeout for power limit write mailbox command. @@ -124,6 +135,9 @@ struct xe_hwmon { bool boot_power_limit_read; /** @pl1_on_boot: power limit PL1 on boot */ u32 pl1_on_boot[CHANNEL_MAX]; + /** @pl2_on_boot: power limit PL2 on boot */ + u32 pl2_on_boot[CHANNEL_MAX]; + }; static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 attr, int channel, @@ -151,16 +165,18 @@ static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 att /* return the value only if limit is enabled */ if (attr == PL1_HWMON_ATTR) *uval = (val0 & PWR_LIM_EN) ? val0 : 0; + else if (attr == PL2_HWMON_ATTR) + *uval = (val1 & PWR_LIM_EN) ? val1 : 0; else if (attr == hwmon_power_label) - *uval = (val0 & PWR_LIM_EN) ? 1 : 0; + *uval = (val0 & PWR_LIM_EN) ? 1 : (val1 & PWR_LIM_EN) ? 1 : 0; else *uval = 0; return ret; } -static int xe_hwmon_pcode_write_power_limit(const struct xe_hwmon *hwmon, u32 attr, u8 channel, - u32 uval) +static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr, u8 channel, + u32 clr, u32 set) { struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); u32 val0, val1; @@ -179,7 +195,9 @@ static int xe_hwmon_pcode_write_power_limit(const struct xe_hwmon *hwmon, u32 at channel, val0, val1, ret); if (attr == PL1_HWMON_ATTR) - val0 = uval; + val0 = (val0 & ~clr) | set; + else if (attr == PL2_HWMON_ATTR) + val1 = (val1 & ~clr) | set; else return -EIO; @@ -236,12 +254,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg return GT_PERF_STATUS; break; case REG_PKG_ENERGY_STATUS: - if (xe->info.platform == XE_BATTLEMAGE) { - if (channel == CHANNEL_PKG) - return BMG_PACKAGE_ENERGY_STATUS; - else - return BMG_PLATFORM_ENERGY_STATUS; - } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) { + if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) { return PVC_GT0_PLATFORM_ENERGY_STATUS; } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) { return PCU_CR_PACKAGE_ENERGY_STATUS; @@ -273,7 +286,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg */ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value) { - u64 reg_val, min, max; + u64 reg_val = 0, min, max; struct xe_device *xe = hwmon->xe; struct xe_reg rapl_limit, pkg_power_sku; struct xe_mmio *mmio = xe_root_tile_mmio(xe); @@ -285,16 +298,6 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe } else { rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); - - /* - * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible. - * So not checking it again here. - */ - if (!xe_reg_is_valid(pkg_power_sku)) { - drm_warn(&xe->drm, "pkg_power_sku invalid\n"); - *value = 0; - goto unlock; - } reg_val = xe_mmio_read32(mmio, rapl_limit); } @@ -327,7 +330,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe { struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); int ret = 0; - u32 reg_val; + u32 reg_val, max; struct xe_reg rapl_limit; mutex_lock(&hwmon->hwmon_lock); @@ -339,7 +342,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe if (hwmon->xe->info.has_mbx_power_limits) { drm_dbg(&hwmon->xe->drm, "disabling %s on channel %d\n", PWR_ATTR_TO_STR(attr), channel); - xe_hwmon_pcode_write_power_limit(hwmon, attr, channel, 0); + xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM_EN, 0); xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, ®_val); } else { reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM_EN, 0); @@ -355,25 +358,29 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe /* Computation in 64-bits to avoid overflow. Round to nearest. */ reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER); - reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val); /* - * Clamp power limit to card-firmware default as maximum, as an additional protection to + * Clamp power limit to GPU firmware default as maximum, as an additional protection to * pcode clamp. */ if (hwmon->xe->info.has_mbx_power_limits) { - if (reg_val > REG_FIELD_GET(PWR_LIM_VAL, hwmon->pl1_on_boot[channel])) { - reg_val = REG_FIELD_GET(PWR_LIM_VAL, hwmon->pl1_on_boot[channel]); - drm_dbg(&hwmon->xe->drm, "Clamping power limit to firmware default 0x%x\n", + max = (attr == PL1_HWMON_ATTR) ? + hwmon->pl1_on_boot[channel] : hwmon->pl2_on_boot[channel]; + max = REG_FIELD_PREP(PWR_LIM_VAL, max); + if (reg_val > max) { + reg_val = max; + drm_dbg(&hwmon->xe->drm, + "Clamping power limit to GPU firmware default 0x%x\n", reg_val); } } + reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val); + if (hwmon->xe->info.has_mbx_power_limits) - ret = xe_hwmon_pcode_write_power_limit(hwmon, attr, channel, reg_val); + ret = xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM, reg_val); else - reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM_EN | PWR_LIM_VAL, - reg_val); + reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM, reg_val); unlock: mutex_unlock(&hwmon->hwmon_lock); return ret; @@ -428,16 +435,37 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy) { struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); struct xe_hwmon_energy_info *ei = &hwmon->ei[channel]; - u64 reg_val; + u32 reg_val; + int ret = 0; - reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS, - channel)); + /* Energy is supported only for card and pkg */ + if (channel > CHANNEL_PKG) { + *energy = 0; + return; + } - if (reg_val >= ei->reg_val_prev) - ei->accum_energy += reg_val - ei->reg_val_prev; - else - ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val; + if (hwmon->xe->info.platform == XE_BATTLEMAGE) { + u64 pmt_val; + + ret = xe_pmt_telem_read(to_pci_dev(hwmon->xe->drm.dev), + xe_mmio_read32(mmio, PUNIT_TELEMETRY_GUID), + &pmt_val, BMG_ENERGY_STATUS_PMT_OFFSET, sizeof(pmt_val)); + if (ret != sizeof(pmt_val)) { + drm_warn(&hwmon->xe->drm, "energy read from pmt failed, ret %d\n", ret); + *energy = 0; + return; + } + + if (channel == CHANNEL_PKG) + reg_val = REG_FIELD_GET64(ENERGY_PKG, pmt_val); + else + reg_val = REG_FIELD_GET64(ENERGY_CARD, pmt_val); + } else { + reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS, + channel)); + } + ei->accum_energy += reg_val - ei->reg_val_prev; ei->reg_val_prev = reg_val; *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY, @@ -452,8 +480,9 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); u32 x, y, x_w = 2; /* 2 bits */ u64 r, tau4, out; - int channel = to_sensor_dev_attr(attr)->index; - u32 power_attr = PL1_HWMON_ATTR; + int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD; + u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR; + int ret = 0; xe_pm_runtime_get(hwmon->xe); @@ -506,9 +535,9 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a u32 x, y, rxy, x_w = 2; /* 2 bits */ u64 tau4, r, max_win; unsigned long val; + int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD; + u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR; int ret; - int channel = to_sensor_dev_attr(attr)->index; - u32 power_attr = PL1_HWMON_ATTR; ret = kstrtoul(buf, 0, &val); if (ret) @@ -535,10 +564,8 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a tau4 = (u64)((1 << x_w) | x) << y; max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); - if (val > max_win) { - drm_warn(&hwmon->xe->drm, "power_interval invalid val 0x%lx\n", val); + if (val > max_win) return -EINVAL; - } /* val in hw units */ val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME) + 1; @@ -563,14 +590,11 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a mutex_lock(&hwmon->hwmon_lock); - if (hwmon->xe->info.has_mbx_power_limits) { - ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r); - r = (r & ~PWR_LIM_TIME) | rxy; - xe_hwmon_pcode_write_power_limit(hwmon, power_attr, channel, r); - } else { + if (hwmon->xe->info.has_mbx_power_limits) + xe_hwmon_pcode_rmw_power_limit(hwmon, power_attr, channel, PWR_LIM_TIME, rxy); + else r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel), PWR_LIM_TIME, rxy); - } mutex_unlock(&hwmon->hwmon_lock); @@ -582,15 +606,25 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a /* PSYS PL1 */ static SENSOR_DEVICE_ATTR(power1_max_interval, 0664, xe_hwmon_power_max_interval_show, - xe_hwmon_power_max_interval_store, CHANNEL_CARD); - + xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL1); +/* PKG PL1 */ static SENSOR_DEVICE_ATTR(power2_max_interval, 0664, xe_hwmon_power_max_interval_show, - xe_hwmon_power_max_interval_store, CHANNEL_PKG); + xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL1); +/* PSYS PL2 */ +static SENSOR_DEVICE_ATTR(power1_cap_interval, 0664, + xe_hwmon_power_max_interval_show, + xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL2); +/* PKG PL2 */ +static SENSOR_DEVICE_ATTR(power2_cap_interval, 0664, + xe_hwmon_power_max_interval_show, + xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL2); static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_power1_max_interval.dev_attr.attr, &sensor_dev_attr_power2_max_interval.dev_attr.attr, + &sensor_dev_attr_power1_cap_interval.dev_attr.attr, + &sensor_dev_attr_power2_cap_interval.dev_attr.attr, NULL }; @@ -600,19 +634,22 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct xe_hwmon *hwmon = dev_get_drvdata(dev); int ret = 0; - int channel = index ? CHANNEL_PKG : CHANNEL_CARD; - u32 power_attr = PL1_HWMON_ATTR; - u32 uval; + int channel = (index % 2) ? CHANNEL_PKG : CHANNEL_CARD; + u32 power_attr = (index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR; + u32 uval = 0; + struct xe_reg rapl_limit; + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); xe_pm_runtime_get(hwmon->xe); if (hwmon->xe->info.has_mbx_power_limits) { xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &uval); - ret = (uval & PWR_LIM_EN) ? attr->mode : 0; - } else { - ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, - channel)) ? attr->mode : 0; + } else if (power_attr != PL2_HWMON_ATTR) { + rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); + if (xe_reg_is_valid(rapl_limit)) + uval = xe_mmio_read32(mmio, rapl_limit); } + ret = (uval & PWR_LIM_EN) ? attr->mode : 0; xe_pm_runtime_put(hwmon->xe); @@ -632,8 +669,9 @@ static const struct attribute_group *hwmon_groups[] = { static const struct hwmon_channel_info * const hwmon_info[] = { HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL), - HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT, - HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL), + HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT | + HWMON_P_CAP, + HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CAP), HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL), HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL), HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL), @@ -754,31 +792,62 @@ xe_hwmon_temp_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) static umode_t xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) { - u32 uval; + u32 uval = 0; + struct xe_reg reg; + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); switch (attr) { case hwmon_power_max: + case hwmon_power_cap: if (hwmon->xe->info.has_mbx_power_limits) { xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval); - return (uval) ? 0664 : 0; - } else { - return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, - channel)) ? 0664 : 0; + } else if (attr != PL2_HWMON_ATTR) { + reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); + if (xe_reg_is_valid(reg)) + uval = xe_mmio_read32(mmio, reg); } + if (uval & PWR_LIM_EN) { + drm_info(&hwmon->xe->drm, "%s is supported on channel %d\n", + PWR_ATTR_TO_STR(attr), channel); + return 0664; + } + drm_dbg(&hwmon->xe->drm, "%s is unsupported on channel %d\n", + PWR_ATTR_TO_STR(attr), channel); + return 0; case hwmon_power_rated_max: - if (hwmon->xe->info.has_mbx_power_limits) + if (hwmon->xe->info.has_mbx_power_limits) { return 0; - else - return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, - channel)) ? 0444 : 0; + } else { + reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); + if (xe_reg_is_valid(reg)) + uval = xe_mmio_read32(mmio, reg); + return uval ? 0444 : 0; + } case hwmon_power_crit: - case hwmon_power_label: if (channel == CHANNEL_CARD) { xe_hwmon_pcode_read_i1(hwmon, &uval); - return (uval & POWER_SETUP_I1_WATTS) ? (attr == hwmon_power_label) ? - 0444 : 0644 : 0; + return (uval & POWER_SETUP_I1_WATTS) ? 0644 : 0; } break; + case hwmon_power_label: + if (hwmon->xe->info.has_mbx_power_limits) { + xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval); + } else { + reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); + if (xe_reg_is_valid(reg)) + uval = xe_mmio_read32(mmio, reg); + + if (!uval) { + reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); + if (xe_reg_is_valid(reg)) + uval = xe_mmio_read32(mmio, reg); + } + } + if ((!(uval & PWR_LIM_EN)) && channel == CHANNEL_CARD) { + xe_hwmon_pcode_read_i1(hwmon, &uval); + return (uval & POWER_SETUP_I1_WATTS) ? 0444 : 0; + } + return (uval) ? 0444 : 0; default: return 0; } @@ -790,6 +859,7 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) { switch (attr) { case hwmon_power_max: + case hwmon_power_cap: xe_hwmon_power_max_read(hwmon, attr, channel, val); return 0; case hwmon_power_rated_max: @@ -806,6 +876,7 @@ static int xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val) { switch (attr) { + case hwmon_power_cap: case hwmon_power_max: return xe_hwmon_power_max_write(hwmon, attr, channel, val); case hwmon_power_crit: @@ -888,11 +959,18 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) static umode_t xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) { + long energy = 0; + switch (attr) { case hwmon_energy_input: case hwmon_energy_label: - return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS, - channel)) ? 0444 : 0; + if (hwmon->xe->info.platform == XE_BATTLEMAGE) { + xe_hwmon_energy_get(hwmon, channel, &energy); + return energy ? 0444 : 0; + } else { + return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS, + channel)) ? 0444 : 0; + } default: return 0; } @@ -1128,22 +1206,32 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon) struct xe_reg pkg_power_sku_unit; if (hwmon->xe->info.has_mbx_power_limits) { - /* Check if card firmware support mailbox power limits commands. */ + /* Check if GPU firmware support mailbox power limits commands. */ if (xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_CARD, &hwmon->pl1_on_boot[CHANNEL_CARD]) | xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_PKG, - &hwmon->pl1_on_boot[CHANNEL_PKG])) { + &hwmon->pl1_on_boot[CHANNEL_PKG]) | + xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_CARD, + &hwmon->pl2_on_boot[CHANNEL_CARD]) | + xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_PKG, + &hwmon->pl2_on_boot[CHANNEL_PKG])) { drm_warn(&hwmon->xe->drm, - "Failed to read power limits, check card firmware !\n"); + "Failed to read power limits, check GPU firmware !\n"); } else { drm_info(&hwmon->xe->drm, "Using mailbox commands for power limits\n"); /* Write default limits to read from pcode from now on. */ - xe_hwmon_pcode_write_power_limit(hwmon, PL1_HWMON_ATTR, - CHANNEL_CARD, - hwmon->pl1_on_boot[CHANNEL_CARD]); - xe_hwmon_pcode_write_power_limit(hwmon, PL1_HWMON_ATTR, - CHANNEL_PKG, - hwmon->pl1_on_boot[CHANNEL_PKG]); + xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR, + CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME, + hwmon->pl1_on_boot[CHANNEL_CARD]); + xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR, + CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME, + hwmon->pl1_on_boot[CHANNEL_PKG]); + xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR, + CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME, + hwmon->pl2_on_boot[CHANNEL_CARD]); + xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR, + CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME, + hwmon->pl2_on_boot[CHANNEL_PKG]); hwmon->scl_shift_power = PWR_UNIT; hwmon->scl_shift_energy = ENERGY_UNIT; hwmon->scl_shift_time = TIME_UNIT; @@ -1227,4 +1315,4 @@ int xe_hwmon_register(struct xe_device *xe) return 0; } - +MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY"); diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index bf7c3981897d..37598588a54f 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -40,6 +40,7 @@ #define LRC_PPHWSP_SIZE SZ_4K #define LRC_INDIRECT_RING_STATE_SIZE SZ_4K +#define LRC_WA_BB_SIZE SZ_4K static struct xe_device * lrc_to_xe(struct xe_lrc *lrc) @@ -654,8 +655,8 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc) #define LRC_SEQNO_PPHWSP_OFFSET 512 #define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8) #define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8) +#define LRC_ENGINE_ID_PPHWSP_OFFSET 1024 #define LRC_PARALLEL_PPHWSP_OFFSET 2048 -#define LRC_ENGINE_ID_PPHWSP_OFFSET 2096 u32 xe_lrc_regs_offset(struct xe_lrc *lrc) { @@ -910,13 +911,11 @@ static void xe_lrc_finish(struct xe_lrc *lrc) { xe_hw_fence_ctx_finish(&lrc->fence_ctx); xe_bo_unpin_map_no_vm(lrc->bo); - xe_bo_unpin_map_no_vm(lrc->bb_per_ctx_bo); } /* - * xe_lrc_setup_utilization() - Setup wa bb to assist in calculating active - * context run ticks. - * @lrc: Pointer to the lrc. + * wa_bb_setup_utilization() - Write commands to wa bb to assist + * in calculating active context run ticks. * * Context Timestamp (CTX_TIMESTAMP) in the LRC accumulates the run ticks of the * context, but only gets updated when the context switches out. In order to @@ -941,18 +940,13 @@ static void xe_lrc_finish(struct xe_lrc *lrc) * store it in the PPHSWP. */ #define CONTEXT_ACTIVE 1ULL -static int xe_lrc_setup_utilization(struct xe_lrc *lrc) +static ssize_t wa_bb_setup_utilization(struct xe_lrc *lrc, struct xe_hw_engine *hwe, + u32 *batch, size_t max_len) { - u32 *cmd, *buf = NULL; + u32 *cmd = batch; - if (lrc->bb_per_ctx_bo->vmap.is_iomem) { - buf = kmalloc(lrc->bb_per_ctx_bo->size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - cmd = buf; - } else { - cmd = lrc->bb_per_ctx_bo->vmap.vaddr; - } + if (xe_gt_WARN_ON(lrc->gt, max_len < 12)) + return -ENOSPC; *cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET; *cmd++ = ENGINE_ID(0).addr; @@ -971,18 +965,71 @@ static int xe_lrc_setup_utilization(struct xe_lrc *lrc) *cmd++ = upper_32_bits(CONTEXT_ACTIVE); } + return cmd - batch; +} + +struct wa_bb_setup { + ssize_t (*setup)(struct xe_lrc *lrc, struct xe_hw_engine *hwe, + u32 *batch, size_t max_size); +}; + +static size_t wa_bb_offset(struct xe_lrc *lrc) +{ + return lrc->bo->size - LRC_WA_BB_SIZE; +} + +static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe) +{ + const size_t max_size = LRC_WA_BB_SIZE; + static const struct wa_bb_setup funcs[] = { + { .setup = wa_bb_setup_utilization }, + }; + ssize_t remain; + u32 *cmd, *buf = NULL; + + if (lrc->bo->vmap.is_iomem) { + buf = kmalloc(max_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + cmd = buf; + } else { + cmd = lrc->bo->vmap.vaddr + wa_bb_offset(lrc); + } + + remain = max_size / sizeof(*cmd); + + for (size_t i = 0; i < ARRAY_SIZE(funcs); i++) { + ssize_t len = funcs[i].setup(lrc, hwe, cmd, remain); + + remain -= len; + + /* + * There should always be at least 1 additional dword for + * the end marker + */ + if (len < 0 || xe_gt_WARN_ON(lrc->gt, remain < 1)) + goto fail; + + cmd += len; + } + *cmd++ = MI_BATCH_BUFFER_END; if (buf) { - xe_map_memcpy_to(gt_to_xe(lrc->gt), &lrc->bb_per_ctx_bo->vmap, 0, - buf, (cmd - buf) * sizeof(*cmd)); + xe_map_memcpy_to(gt_to_xe(lrc->gt), &lrc->bo->vmap, + wa_bb_offset(lrc), buf, + (cmd - buf) * sizeof(*cmd)); kfree(buf); } - xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR, - xe_bo_ggtt_addr(lrc->bb_per_ctx_bo) | 1); + xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR, xe_bo_ggtt_addr(lrc->bo) + + wa_bb_offset(lrc) + 1); return 0; + +fail: + kfree(buf); + return -ENOSPC; } #define PVC_CTX_ASID (0x2e + 1) @@ -1018,20 +1065,13 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, * FIXME: Perma-pinning LRC as we don't yet support moving GGTT address * via VM bind calls. */ - lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, lrc_size, + lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, + lrc_size + LRC_WA_BB_SIZE, ttm_bo_type_kernel, bo_flags); if (IS_ERR(lrc->bo)) return PTR_ERR(lrc->bo); - lrc->bb_per_ctx_bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, - ttm_bo_type_kernel, - bo_flags); - if (IS_ERR(lrc->bb_per_ctx_bo)) { - err = PTR_ERR(lrc->bb_per_ctx_bo); - goto err_lrc_finish; - } - lrc->size = lrc_size; lrc->ring.size = ring_size; lrc->ring.tail = 0; @@ -1139,7 +1179,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, map = __xe_lrc_start_seqno_map(lrc); xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1); - err = xe_lrc_setup_utilization(lrc); + err = setup_wa_bb(lrc, hwe); if (err) goto err_lrc_finish; @@ -1819,7 +1859,8 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc) snapshot->seqno = xe_lrc_seqno(lrc); snapshot->lrc_bo = xe_bo_get(lrc->bo); snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc); - snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset; + snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset - + LRC_WA_BB_SIZE; snapshot->lrc_snapshot = NULL; snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc)); snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc); diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h index ae24cf6f8dd9..883e550a9423 100644 --- a/drivers/gpu/drm/xe/xe_lrc_types.h +++ b/drivers/gpu/drm/xe/xe_lrc_types.h @@ -53,9 +53,6 @@ struct xe_lrc { /** @ctx_timestamp: readout value of CTX_TIMESTAMP on last update */ u64 ctx_timestamp; - - /** @bb_per_ctx_bo: buffer object for per context batch wa buffer */ - struct xe_bo *bb_per_ctx_bo; }; struct xe_lrc_snapshot; diff --git a/drivers/gpu/drm/xe/xe_map.h b/drivers/gpu/drm/xe/xe_map.h index f62e0c8b67ab..8d67f6ba2d95 100644 --- a/drivers/gpu/drm/xe/xe_map.h +++ b/drivers/gpu/drm/xe/xe_map.h @@ -78,6 +78,24 @@ static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map, iosys_map_wr(map__, offset__, type__, val__); \ }) +#define xe_map_rd_array(xe__, map__, index__, type__) \ + xe_map_rd(xe__, map__, (index__) * sizeof(type__), type__) + +#define xe_map_wr_array(xe__, map__, index__, type__, val__) \ + xe_map_wr(xe__, map__, (index__) * sizeof(type__), type__, val__) + +#define xe_map_rd_array_u32(xe__, map__, index__) \ + xe_map_rd_array(xe__, map__, index__, u32) + +#define xe_map_wr_array_u32(xe__, map__, index__, val__) \ + xe_map_wr_array(xe__, map__, index__, u32, val__) + +#define xe_map_rd_ring_u32(xe__, map__, index__, size__) \ + xe_map_rd_array_u32(xe__, map__, (index__) % (size__)) + +#define xe_map_wr_ring_u32(xe__, map__, index__, size__, val__) \ + xe_map_wr_array_u32(xe__, map__, (index__) % (size__), val__) + #define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({ \ struct xe_device *__xe = xe__; \ xe_device_assert_mem_access(__xe); \ diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c index e4742e27e2cd..e332f3142435 100644 --- a/drivers/gpu/drm/xe/xe_module.c +++ b/drivers/gpu/drm/xe/xe_module.c @@ -18,9 +18,15 @@ #include "xe_observation.h" #include "xe_sched_job.h" +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +#define DEFAULT_GUC_LOG_LEVEL 3 +#else +#define DEFAULT_GUC_LOG_LEVEL 1 +#endif + struct xe_modparam xe_modparam = { .probe_display = true, - .guc_log_level = 3, + .guc_log_level = DEFAULT_GUC_LOG_LEVEL, .force_probe = CONFIG_DRM_XE_FORCE_PROBE, .wedged_mode = 1, .svm_notifier_size = 512, @@ -40,7 +46,8 @@ module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, int, 0600); MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size (in MiB) - <0=disable-resize, 0=max-needed-size[default], >0=force-size"); module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600); -MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1..5=enable with verbosity min..max)"); +MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1=normal, 2..5=verbose-levels " + "[default=" __stringify(DEFAULT_GUC_LOG_LEVEL) "])"); module_param_named_unsafe(guc_firmware_path, xe_modparam.guc_firmware_path, charp, 0400); MODULE_PARM_DESC(guc_firmware_path, diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index fb842fa0552e..4829ed46a8b4 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -43,6 +43,12 @@ #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ) #define XE_OA_UNIT_INVALID U32_MAX +enum xe_oam_unit_type { + XE_OAM_UNIT_SAG, + XE_OAM_UNIT_SCMI_0, + XE_OAM_UNIT_SCMI_1, +}; + enum xe_oa_submit_deps { XE_OA_SUBMIT_NO_DEPS, XE_OA_SUBMIT_ADD_DEPS, @@ -77,7 +83,7 @@ struct xe_oa_config { struct xe_oa_open_param { struct xe_file *xef; - u32 oa_unit_id; + struct xe_oa_unit *oa_unit; bool sample; u32 metric_set; enum xe_oa_format_name oa_format; @@ -194,7 +200,7 @@ static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *l static const struct xe_oa_regs *__oa_regs(struct xe_oa_stream *stream) { - return &stream->hwe->oa_unit->regs; + return &stream->oa_unit->regs; } static u32 xe_oa_hw_tail_read(struct xe_oa_stream *stream) @@ -454,7 +460,7 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream) static u32 __oactrl_used_bits(struct xe_oa_stream *stream) { - return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ? + return stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ? OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS; } @@ -475,7 +481,7 @@ static void xe_oa_enable(struct xe_oa_stream *stream) __oa_ccs_select(stream) | OAG_OACONTROL_OA_COUNTER_ENABLE; if (GRAPHICS_VER(stream->oa->xe) >= 20 && - stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG) + stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG) val |= OAG_OACONTROL_OA_PES_DISAG_EN; xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val); @@ -838,11 +844,16 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream) /* Reset PMON Enable to save power. */ xe_mmio_rmw32(mmio, XELPMP_SQCNT1, sqcnt1, 0); + + if ((stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM || + stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM_SAG) && + GRAPHICS_VER(stream->oa->xe) >= 30) + xe_mmio_rmw32(mmio, OAM_COMPRESSION_T3_CONTROL, OAM_LAT_MEASURE_ENABLE, 0); } static void xe_oa_stream_destroy(struct xe_oa_stream *stream) { - struct xe_oa_unit *u = stream->hwe->oa_unit; + struct xe_oa_unit *u = stream->oa_unit; struct xe_gt *gt = stream->hwe->gt; if (WARN_ON(stream != u->exclusive_stream)) @@ -1105,9 +1116,13 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream) */ sqcnt1 = SQCNT1_PMON_ENABLE | (HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0); - xe_mmio_rmw32(mmio, XELPMP_SQCNT1, 0, sqcnt1); + if ((stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM || + stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM_SAG) && + GRAPHICS_VER(stream->oa->xe) >= 30) + xe_mmio_rmw32(mmio, OAM_COMPRESSION_T3_CONTROL, 0, OAM_LAT_MEASURE_ENABLE); + /* Configure OAR/OAC */ if (stream->exec_q) { ret = xe_oa_configure_oa_context(stream, true); @@ -1139,14 +1154,31 @@ static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *n return -EINVAL; } +static struct xe_oa_unit *xe_oa_lookup_oa_unit(struct xe_oa *oa, u32 oa_unit_id) +{ + struct xe_gt *gt; + int gt_id, i; + + for_each_gt(gt, oa->xe, gt_id) { + for (i = 0; i < gt->oa.num_oa_units; i++) { + struct xe_oa_unit *u = >->oa.oa_unit[i]; + + if (u->oa_unit_id == oa_unit_id) + return u; + } + } + + return NULL; +} + static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value, struct xe_oa_open_param *param) { - if (value >= oa->oa_unit_ids) { + param->oa_unit = xe_oa_lookup_oa_unit(oa, value); + if (!param->oa_unit) { drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value); return -EINVAL; } - param->oa_unit_id = value; return 0; } @@ -1677,13 +1709,13 @@ static const struct file_operations xe_oa_fops = { static int xe_oa_stream_init(struct xe_oa_stream *stream, struct xe_oa_open_param *param) { - struct xe_oa_unit *u = param->hwe->oa_unit; struct xe_gt *gt = param->hwe->gt; unsigned int fw_ref; int ret; stream->exec_q = param->exec_q; stream->poll_period_ns = DEFAULT_POLL_PERIOD_NS; + stream->oa_unit = param->oa_unit; stream->hwe = param->hwe; stream->gt = stream->hwe->gt; stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format]; @@ -1704,7 +1736,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, * buffer whose size, circ_size, is a multiple of the report size */ if (GRAPHICS_VER(stream->oa->xe) >= 20 && - stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample) + stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample) stream->oa_buffer.circ_size = param->oa_buffer_size - param->oa_buffer_size % stream->oa_buffer.format->size; @@ -1762,7 +1794,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, drm_dbg(&stream->oa->xe->drm, "opening stream oa config uuid=%s\n", stream->oa_config->uuid); - WRITE_ONCE(u->exclusive_stream, stream); + WRITE_ONCE(stream->oa_unit->exclusive_stream, stream); hrtimer_setup(&stream->poll_check_timer, xe_oa_poll_check_timer_cb, CLOCK_MONOTONIC, HRTIMER_MODE_REL); @@ -1798,7 +1830,7 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa, int ret; /* We currently only allow exclusive access */ - if (param->hwe->oa_unit->exclusive_stream) { + if (param->oa_unit->exclusive_stream) { drm_dbg(&oa->xe->drm, "OA unit already in use\n"); ret = -EBUSY; goto exit; @@ -1874,13 +1906,14 @@ static u64 oa_exponent_to_ns(struct xe_gt *gt, int exponent) return div_u64(nom + den - 1, den); } -static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type) +static bool oa_unit_supports_oa_format(struct xe_oa_open_param *param, int type) { - switch (hwe->oa_unit->type) { + switch (param->oa_unit->type) { case DRM_XE_OA_UNIT_TYPE_OAG: return type == DRM_XE_OA_FMT_TYPE_OAG || type == DRM_XE_OA_FMT_TYPE_OAR || type == DRM_XE_OA_FMT_TYPE_OAC || type == DRM_XE_OA_FMT_TYPE_PEC; case DRM_XE_OA_UNIT_TYPE_OAM: + case DRM_XE_OA_UNIT_TYPE_OAM_SAG: return type == DRM_XE_OA_FMT_TYPE_OAM || type == DRM_XE_OA_FMT_TYPE_OAM_MPEC; default: return false; @@ -1899,37 +1932,48 @@ u16 xe_oa_unit_id(struct xe_hw_engine *hwe) hwe->oa_unit->oa_unit_id : U16_MAX; } +/* A hwe must be assigned to stream/oa_unit for batch submissions */ static int xe_oa_assign_hwe(struct xe_oa *oa, struct xe_oa_open_param *param) { - struct xe_gt *gt; - int i, ret = 0; + struct xe_hw_engine *hwe; + enum xe_hw_engine_id id; + int ret = 0; + + /* If not provided, OA unit defaults to OA unit 0 as per uapi */ + if (!param->oa_unit) + param->oa_unit = &xe_device_get_gt(oa->xe, 0)->oa.oa_unit[0]; + /* When we have an exec_q, get hwe from the exec_q */ if (param->exec_q) { - /* When we have an exec_q, get hwe from the exec_q */ param->hwe = xe_gt_hw_engine(param->exec_q->gt, param->exec_q->class, param->engine_instance, true); - } else { - struct xe_hw_engine *hwe; - enum xe_hw_engine_id id; - - /* Else just get the first hwe attached to the oa unit */ - for_each_gt(gt, oa->xe, i) { - for_each_hw_engine(hwe, gt, id) { - if (xe_oa_unit_id(hwe) == param->oa_unit_id) { - param->hwe = hwe; - goto out; - } - } - } + if (!param->hwe || param->hwe->oa_unit != param->oa_unit) + goto err; + goto out; } -out: - if (!param->hwe || xe_oa_unit_id(param->hwe) != param->oa_unit_id) { - drm_dbg(&oa->xe->drm, "Unable to find hwe (%d, %d) for OA unit ID %d\n", - param->exec_q ? param->exec_q->class : -1, - param->engine_instance, param->oa_unit_id); - ret = -EINVAL; + + /* Else just get the first hwe attached to the oa unit */ + for_each_hw_engine(hwe, param->oa_unit->gt, id) { + if (hwe->oa_unit == param->oa_unit) { + param->hwe = hwe; + goto out; + } } + /* If we still didn't find a hwe, just get one with a valid oa_unit from the same gt */ + for_each_hw_engine(hwe, param->oa_unit->gt, id) { + if (!hwe->oa_unit) + continue; + + param->hwe = hwe; + goto out; + } +err: + drm_dbg(&oa->xe->drm, "Unable to find hwe (%d, %d) for OA unit ID %d\n", + param->exec_q ? param->exec_q->class : -1, + param->engine_instance, param->oa_unit->oa_unit_id); + ret = -EINVAL; +out: return ret; } @@ -2007,7 +2051,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f f = &oa->oa_formats[param.oa_format]; if (!param.oa_format || !f->size || - !engine_supports_oa_format(param.hwe, f->type)) { + !oa_unit_supports_oa_format(¶m, f->type)) { drm_dbg(&oa->xe->drm, "Invalid OA format %d type %d size %d for class %d\n", param.oa_format, f->type, f->size, param.hwe->class); ret = -EINVAL; @@ -2155,6 +2199,7 @@ static const struct xe_mmio_range gen12_oa_mux_regs[] = { static const struct xe_mmio_range xe2_oa_mux_regs[] = { { .start = 0x5194, .end = 0x5194 }, /* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */ { .start = 0x8704, .end = 0x8704 }, /* LMEM_LAT_MEASURE_MCFG_GRP */ + { .start = 0xB01C, .end = 0xB01C }, /* LNCF_MISC_CONFIG_REGISTER0 */ { .start = 0xB1BC, .end = 0xB1BC }, /* L3_BANK_LAT_MEASURE_LBCF_GFX */ { .start = 0xD0E0, .end = 0xD0F4 }, /* VISACTL */ { .start = 0xE18C, .end = 0xE18C }, /* SAMPLER_MODE */ @@ -2448,20 +2493,38 @@ int xe_oa_register(struct xe_device *xe) static u32 num_oa_units_per_gt(struct xe_gt *gt) { - return 1; + if (!xe_gt_is_media_type(gt) || GRAPHICS_VER(gt_to_xe(gt)) < 20) + return 1; + else if (!IS_DGFX(gt_to_xe(gt))) + return XE_OAM_UNIT_SCMI_0 + 1; /* SAG + SCMI_0 */ + else + return XE_OAM_UNIT_SCMI_1 + 1; /* SAG + SCMI_0 + SCMI_1 */ } static u32 __hwe_oam_unit(struct xe_hw_engine *hwe) { - if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) >= 1270) { - /* - * There's 1 SAMEDIA gt and 1 OAM per SAMEDIA gt. All media slices - * within the gt use the same OAM. All MTL/LNL SKUs list 1 SA MEDIA - */ - xe_gt_WARN_ON(hwe->gt, hwe->gt->info.type != XE_GT_TYPE_MEDIA); + if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) < 1270) + return XE_OA_UNIT_INVALID; + + xe_gt_WARN_ON(hwe->gt, !xe_gt_is_media_type(hwe->gt)); + if (GRAPHICS_VER(gt_to_xe(hwe->gt)) < 20) return 0; - } + /* + * XE_OAM_UNIT_SAG has only GSCCS attached to it, but only on some platforms. Also + * GSCCS cannot be used to submit batches to program the OAM unit. Therefore we don't + * assign an OA unit to GSCCS. This means that XE_OAM_UNIT_SAG is exposed as an OA + * unit without attached engines. Fused off engines can also result in oa_unit's with + * num_engines == 0. OA streams can be opened on all OA units. + */ + else if (hwe->engine_id == XE_HW_ENGINE_GSCCS0) + return XE_OA_UNIT_INVALID; + else if (!IS_DGFX(gt_to_xe(hwe->gt))) + return XE_OAM_UNIT_SCMI_0; + else if (hwe->class == XE_ENGINE_CLASS_VIDEO_DECODE) + return (hwe->instance / 2 & 0x1) + 1; + else if (hwe->class == XE_ENGINE_CLASS_VIDEO_ENHANCE) + return (hwe->instance & 0x1) + 1; return XE_OA_UNIT_INVALID; } @@ -2475,6 +2538,7 @@ static u32 __hwe_oa_unit(struct xe_hw_engine *hwe) case XE_ENGINE_CLASS_VIDEO_DECODE: case XE_ENGINE_CLASS_VIDEO_ENHANCE: + case XE_ENGINE_CLASS_OTHER: return __hwe_oam_unit(hwe); default: @@ -2514,20 +2578,29 @@ static struct xe_oa_regs __oag_regs(void) static void __xe_oa_init_oa_units(struct xe_gt *gt) { - const u32 mtl_oa_base[] = { 0x13000 }; + /* Actual address is MEDIA_GT_GSI_OFFSET + oam_base_addr[i] */ + const u32 oam_base_addr[] = { + [XE_OAM_UNIT_SAG] = 0x13000, + [XE_OAM_UNIT_SCMI_0] = 0x14000, + [XE_OAM_UNIT_SCMI_1] = 0x14800, + }; int i, num_units = gt->oa.num_oa_units; for (i = 0; i < num_units; i++) { struct xe_oa_unit *u = >->oa.oa_unit[i]; - if (gt->info.type != XE_GT_TYPE_MEDIA) { + if (!xe_gt_is_media_type(gt)) { u->regs = __oag_regs(); u->type = DRM_XE_OA_UNIT_TYPE_OAG; - } else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { - u->regs = __oam_regs(mtl_oa_base[i]); - u->type = DRM_XE_OA_UNIT_TYPE_OAM; + } else { + xe_gt_assert(gt, GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270); + u->regs = __oam_regs(oam_base_addr[i]); + u->type = i == XE_OAM_UNIT_SAG && GRAPHICS_VER(gt_to_xe(gt)) >= 20 ? + DRM_XE_OA_UNIT_TYPE_OAM_SAG : DRM_XE_OA_UNIT_TYPE_OAM; } + u->gt = gt; + xe_mmio_write32(>->mmio, u->regs.oa_ctrl, 0); /* Ensure MMIO trigger remains disabled till there is a stream */ @@ -2560,10 +2633,6 @@ static int xe_oa_init_gt(struct xe_gt *gt) } } - /* - * Fused off engines can result in oa_unit's with num_engines == 0. These units - * will appear in OA unit query, but no OA streams can be opened on them. - */ gt->oa.num_oa_units = num_oa_units; gt->oa.oa_unit = u; @@ -2574,17 +2643,54 @@ static int xe_oa_init_gt(struct xe_gt *gt) return 0; } +static void xe_oa_print_gt_oa_units(struct xe_gt *gt) +{ + enum xe_hw_engine_id hwe_id; + struct xe_hw_engine *hwe; + struct xe_oa_unit *u; + char buf[256]; + int i, n; + + for (i = 0; i < gt->oa.num_oa_units; i++) { + u = >->oa.oa_unit[i]; + buf[0] = '\0'; + n = 0; + + for_each_hw_engine(hwe, gt, hwe_id) + if (xe_oa_unit_id(hwe) == u->oa_unit_id) + n += scnprintf(buf + n, sizeof(buf) - n, "%s ", hwe->name); + + xe_gt_dbg(gt, "oa_unit %d, type %d, Engines: %s\n", u->oa_unit_id, u->type, buf); + } +} + +static void xe_oa_print_oa_units(struct xe_oa *oa) +{ + struct xe_gt *gt; + int gt_id; + + for_each_gt(gt, oa->xe, gt_id) + xe_oa_print_gt_oa_units(gt); +} + static int xe_oa_init_oa_units(struct xe_oa *oa) { struct xe_gt *gt; int i, ret; + /* Needed for OAM implementation here */ + BUILD_BUG_ON(XE_OAM_UNIT_SAG != 0); + BUILD_BUG_ON(XE_OAM_UNIT_SCMI_0 != 1); + BUILD_BUG_ON(XE_OAM_UNIT_SCMI_1 != 2); + for_each_gt(gt, oa->xe, i) { ret = xe_oa_init_gt(gt); if (ret) return ret; } + xe_oa_print_oa_units(oa); + return 0; } diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h index 52e33c37d5ee..2628f78c4e8d 100644 --- a/drivers/gpu/drm/xe/xe_oa_types.h +++ b/drivers/gpu/drm/xe/xe_oa_types.h @@ -95,6 +95,9 @@ struct xe_oa_unit { /** @oa_unit_id: identifier for the OA unit */ u16 oa_unit_id; + /** @gt: gt associated with the OA unit */ + struct xe_gt *gt; + /** @type: Type of OA unit - OAM, OAG etc. */ enum drm_xe_oa_unit_type type; @@ -182,6 +185,9 @@ struct xe_oa_stream { /** @gt: gt associated with the oa stream */ struct xe_gt *gt; + /** @oa_unit: oa unit for this stream */ + struct xe_oa_unit *oa_unit; + /** @hwe: hardware engine associated with this oa stream */ struct xe_hw_engine *hwe; diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c index 30fdbdb9341e..2e7cb99ae87a 100644 --- a/drivers/gpu/drm/xe/xe_pat.c +++ b/drivers/gpu/drm/xe/xe_pat.c @@ -103,7 +103,8 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = { * * Note: There is an implicit assumption in the driver that compression and * coh_1way+ are mutually exclusive. If this is ever not true then userptr - * and imported dma-buf from external device will have uncleared ccs state. + * and imported dma-buf from external device will have uncleared ccs state. See + * also xe_bo_needs_ccs_pages(). */ #define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \ { \ @@ -162,21 +163,35 @@ u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index) static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[], int n_entries) { + struct xe_device *xe = gt_to_xe(gt); + for (int i = 0; i < n_entries; i++) { struct xe_reg reg = XE_REG(_PAT_INDEX(i)); xe_mmio_write32(>->mmio, reg, table[i].value); } + + if (xe->pat.pat_ats) + xe_mmio_write32(>->mmio, XE_REG(_PAT_ATS), xe->pat.pat_ats->value); + if (xe->pat.pat_pta) + xe_mmio_write32(>->mmio, XE_REG(_PAT_PTA), xe->pat.pat_pta->value); } static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry table[], int n_entries) { + struct xe_device *xe = gt_to_xe(gt); + for (int i = 0; i < n_entries; i++) { struct xe_reg_mcr reg_mcr = XE_REG_MCR(_PAT_INDEX(i)); xe_gt_mcr_multicast_write(gt, reg_mcr, table[i].value); } + + if (xe->pat.pat_ats) + xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe->pat.pat_ats->value); + if (xe->pat.pat_pta) + xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe->pat.pat_pta->value); } static void xelp_dump(struct xe_gt *gt, struct drm_printer *p) @@ -303,26 +318,6 @@ static const struct xe_pat_ops xelpg_pat_ops = { .dump = xelpg_dump, }; -static void xe2lpg_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[], - int n_entries) -{ - program_pat_mcr(gt, table, n_entries); - xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe2_pat_ats.value); - - if (IS_DGFX(gt_to_xe(gt))) - xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe2_pat_pta.value); -} - -static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[], - int n_entries) -{ - program_pat(gt, table, n_entries); - xe_mmio_write32(>->mmio, XE_REG(_PAT_ATS), xe2_pat_ats.value); - - if (IS_DGFX(gt_to_xe(gt))) - xe_mmio_write32(>->mmio, XE_REG(_PAT_PTA), xe2_pat_pta.value); -} - static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); @@ -375,8 +370,8 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) } static const struct xe_pat_ops xe2_pat_ops = { - .program_graphics = xe2lpg_program_pat, - .program_media = xe2lpm_program_pat, + .program_graphics = program_pat_mcr, + .program_media = program_pat, .dump = xe2_dump, }; @@ -385,6 +380,9 @@ void xe_pat_init_early(struct xe_device *xe) if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) { xe->pat.ops = &xe2_pat_ops; xe->pat.table = xe2_pat_table; + xe->pat.pat_ats = &xe2_pat_ats; + if (IS_DGFX(xe)) + xe->pat.pat_pta = &xe2_pat_pta; /* Wa_16023588340. XXX: Should use XE_WA */ if (GRAPHICS_VERx100(xe) == 2001) diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index ac4beaed58ff..89814b32e585 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -180,6 +180,7 @@ static const struct xe_ip graphics_ips[] = { { 1271, "Xe_LPG", &graphics_xelpg }, { 1274, "Xe_LPG+", &graphics_xelpg }, { 2001, "Xe2_HPG", &graphics_xe2 }, + { 2002, "Xe2_HPG", &graphics_xe2 }, { 2004, "Xe2_LPG", &graphics_xe2 }, { 3000, "Xe3_LPG", &graphics_xe2 }, { 3001, "Xe3_LPG", &graphics_xe2 }, diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index b04756a97cdc..c8e63bd23300 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -907,6 +907,11 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma) struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id]; u8 pt_mask = (vma->tile_present & ~vma->tile_invalidated); + if (xe_vma_bo(vma)) + xe_bo_assert_held(xe_vma_bo(vma)); + else if (xe_vma_is_userptr(vma)) + lockdep_assert_held(&xe_vma_vm(vma)->userptr.notifier_lock); + if (!(pt_mask & BIT(tile->id))) return false; @@ -1458,6 +1463,7 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update) struct xe_vm *vm = pt_update->vops->vm; struct xe_vma_ops *vops = pt_update->vops; struct xe_vma_op *op; + unsigned long i; int err; err = xe_pt_pre_commit(pt_update); @@ -1467,20 +1473,35 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update) xe_svm_notifier_lock(vm); list_for_each_entry(op, &vops->list, link) { - struct xe_svm_range *range = op->map_range.range; + struct xe_svm_range *range = NULL; if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) continue; - xe_svm_range_debug(range, "PRE-COMMIT"); + if (op->base.op == DRM_GPUVA_OP_PREFETCH) { + xe_assert(vm->xe, + xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))); + xa_for_each(&op->prefetch_range.range, i, range) { + xe_svm_range_debug(range, "PRE-COMMIT"); - xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma)); - xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE); + if (!xe_svm_range_pages_valid(range)) { + xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); + xe_svm_notifier_unlock(vm); + return -ENODATA; + } + } + } else { + xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma)); + xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE); + range = op->map_range.range; - if (!xe_svm_range_pages_valid(range)) { - xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); - xe_svm_notifier_unlock(vm); - return -EAGAIN; + xe_svm_range_debug(range, "PRE-COMMIT"); + + if (!xe_svm_range_pages_valid(range)) { + xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); + xe_svm_notifier_unlock(vm); + return -EAGAIN; + } } } @@ -1974,6 +1995,32 @@ static int unbind_op_prepare(struct xe_tile *tile, return 0; } +static bool +xe_pt_op_check_range_skip_invalidation(struct xe_vm_pgtable_update_op *pt_op, + struct xe_svm_range *range) +{ + struct xe_vm_pgtable_update *update = pt_op->entries; + + XE_WARN_ON(!pt_op->num_entries); + + /* + * We can't skip the invalidation if we are removing PTEs that span more + * than the range, do some checks to ensure we are removing PTEs that + * are invalid. + */ + + if (pt_op->num_entries > 1) + return false; + + if (update->pt->level == 0) + return true; + + if (update->pt->level == 1) + return xe_svm_range_size(range) >= SZ_2M; + + return false; +} + static int unbind_range_prepare(struct xe_vm *vm, struct xe_tile *tile, struct xe_vm_pgtable_update_ops *pt_update_ops, @@ -2002,7 +2049,10 @@ static int unbind_range_prepare(struct xe_vm *vm, range->base.itree.last + 1); ++pt_update_ops->current_op; pt_update_ops->needs_svm_lock = true; - pt_update_ops->needs_invalidation = true; + pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) || + xe_vm_has_valid_gpu_mapping(tile, range->tile_present, + range->tile_invalidated) || + !xe_pt_op_check_range_skip_invalidation(pt_op, range); xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries, pt_op->num_entries); @@ -2065,11 +2115,20 @@ static int op_prepare(struct xe_vm *vm, { struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va); - if (xe_vma_is_cpu_addr_mirror(vma)) - break; + if (xe_vma_is_cpu_addr_mirror(vma)) { + struct xe_svm_range *range; + unsigned long i; - err = bind_op_prepare(vm, tile, pt_update_ops, vma, false); - pt_update_ops->wait_vm_kernel = true; + xa_for_each(&op->prefetch_range.range, i, range) { + err = bind_range_prepare(vm, tile, pt_update_ops, + vma, range); + if (err) + return err; + } + } else { + err = bind_op_prepare(vm, tile, pt_update_ops, vma, false); + pt_update_ops->wait_vm_kernel = true; + } break; } case DRM_GPUVA_OP_DRIVER: @@ -2166,10 +2225,15 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile, DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_BOOKKEEP); } - vma->tile_present |= BIT(tile->id); - vma->tile_staged &= ~BIT(tile->id); + /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ + WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id)); if (invalidate_on_bind) - vma->tile_invalidated |= BIT(tile->id); + WRITE_ONCE(vma->tile_invalidated, + vma->tile_invalidated | BIT(tile->id)); + else + WRITE_ONCE(vma->tile_invalidated, + vma->tile_invalidated & ~BIT(tile->id)); + vma->tile_staged &= ~BIT(tile->id); if (xe_vma_is_userptr(vma)) { lockdep_assert_held_read(&vm->userptr.notifier_lock); to_userptr_vma(vma)->userptr.initial_bind = true; @@ -2216,6 +2280,18 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile, } } +static void range_present_and_invalidated_tile(struct xe_vm *vm, + struct xe_svm_range *range, + u8 tile_id) +{ + /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ + + lockdep_assert_held(&vm->svm.gpusvm.notifier_lock); + + WRITE_ONCE(range->tile_present, range->tile_present | BIT(tile_id)); + WRITE_ONCE(range->tile_invalidated, range->tile_invalidated & ~BIT(tile_id)); +} + static void op_commit(struct xe_vm *vm, struct xe_tile *tile, struct xe_vm_pgtable_update_ops *pt_update_ops, @@ -2263,27 +2339,28 @@ static void op_commit(struct xe_vm *vm, { struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va); - if (!xe_vma_is_cpu_addr_mirror(vma)) + if (xe_vma_is_cpu_addr_mirror(vma)) { + struct xe_svm_range *range = NULL; + unsigned long i; + + xa_for_each(&op->prefetch_range.range, i, range) + range_present_and_invalidated_tile(vm, range, tile->id); + } else { bind_op_commit(vm, tile, pt_update_ops, vma, fence, fence2, false); + } break; } case DRM_GPUVA_OP_DRIVER: { - /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */ - - if (op->subop == XE_VMA_SUBOP_MAP_RANGE) { - WRITE_ONCE(op->map_range.range->tile_present, - op->map_range.range->tile_present | - BIT(tile->id)); - WRITE_ONCE(op->map_range.range->tile_invalidated, - op->map_range.range->tile_invalidated & - ~BIT(tile->id)); - } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) { + /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ + if (op->subop == XE_VMA_SUBOP_MAP_RANGE) + range_present_and_invalidated_tile(vm, op->map_range.range, tile->id); + else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) WRITE_ONCE(op->unmap_range.range->tile_present, op->unmap_range.range->tile_present & ~BIT(tile->id)); - } + break; } default: @@ -2476,7 +2553,7 @@ free_ifence: kfree(mfence); kfree(ifence); kill_vm_tile1: - if (err != -EAGAIN && tile->id) + if (err != -EAGAIN && err != -ENODATA && tile->id) xe_vm_kill(vops->vm, false); return ERR_PTR(err); diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c index b5bc15f436fa..3d62008c99f1 100644 --- a/drivers/gpu/drm/xe/xe_pxp.c +++ b/drivers/gpu/drm/xe/xe_pxp.c @@ -504,69 +504,62 @@ int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 t return 0; } -static void __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q) +static int __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q) { - spin_lock_irq(&pxp->queues.lock); - list_add_tail(&q->pxp.link, &pxp->queues.list); - spin_unlock_irq(&pxp->queues.lock); + int ret = 0; + + /* + * A queue can be added to the list only if the PXP is in active status, + * otherwise the termination might not handle it correctly. + */ + mutex_lock(&pxp->mutex); + + if (pxp->status == XE_PXP_ACTIVE) { + spin_lock_irq(&pxp->queues.lock); + list_add_tail(&q->pxp.link, &pxp->queues.list); + spin_unlock_irq(&pxp->queues.lock); + } else if (pxp->status == XE_PXP_ERROR || pxp->status == XE_PXP_SUSPENDED) { + ret = -EIO; + } else { + ret = -EBUSY; /* try again later */ + } + + mutex_unlock(&pxp->mutex); + + return ret; } -/** - * xe_pxp_exec_queue_add - add a queue to the PXP list - * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) - * @q: the queue to add to the list - * - * If PXP is enabled and the prerequisites are done, start the PXP ARB - * session (if not already running) and add the queue to the PXP list. Note - * that the queue must have previously been marked as using PXP with - * xe_pxp_exec_queue_set_type. - * - * Returns 0 if the PXP ARB session is running and the queue is in the list, - * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done, - * other errno value if something goes wrong during the session start. - */ -int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q) +static int pxp_start(struct xe_pxp *pxp, u8 type) { int ret = 0; + bool restart = false; if (!xe_pxp_is_enabled(pxp)) return -ENODEV; /* we only support HWDRM sessions right now */ - xe_assert(pxp->xe, q->pxp.type == DRM_XE_PXP_TYPE_HWDRM); - - /* - * Runtime suspend kills PXP, so we take a reference to prevent it from - * happening while we have active queues that use PXP - */ - xe_pm_runtime_get(pxp->xe); + xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM); /* get_readiness_status() returns 0 for in-progress and 1 for done */ ret = xe_pxp_get_readiness_status(pxp); - if (ret <= 0) { - if (!ret) - ret = -EBUSY; - goto out; - } + if (ret <= 0) + return ret ?: -EBUSY; + ret = 0; wait_for_idle: /* * if there is an action in progress, wait for it. We need to wait * outside the lock because the completion is done from within the lock. - * Note that the two action should never be pending at the same time. + * Note that the two actions should never be pending at the same time. */ if (!wait_for_completion_timeout(&pxp->termination, - msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) { - ret = -ETIMEDOUT; - goto out; - } + msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) + return -ETIMEDOUT; if (!wait_for_completion_timeout(&pxp->activation, - msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) { - ret = -ETIMEDOUT; - goto out; - } + msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) + return -ETIMEDOUT; mutex_lock(&pxp->mutex); @@ -574,11 +567,9 @@ wait_for_idle: switch (pxp->status) { case XE_PXP_ERROR: ret = -EIO; - break; + goto out_unlock; case XE_PXP_ACTIVE: - __exec_queue_add(pxp, q); - mutex_unlock(&pxp->mutex); - goto out; + goto out_unlock; case XE_PXP_READY_TO_START: pxp->status = XE_PXP_START_IN_PROGRESS; reinit_completion(&pxp->activation); @@ -586,8 +577,8 @@ wait_for_idle: case XE_PXP_START_IN_PROGRESS: /* If a start is in progress then the completion must not be done */ XE_WARN_ON(completion_done(&pxp->activation)); - mutex_unlock(&pxp->mutex); - goto wait_for_idle; + restart = true; + goto out_unlock; case XE_PXP_NEEDS_TERMINATION: mark_termination_in_progress(pxp); break; @@ -595,29 +586,25 @@ wait_for_idle: case XE_PXP_NEEDS_ADDITIONAL_TERMINATION: /* If a termination is in progress then the completion must not be done */ XE_WARN_ON(completion_done(&pxp->termination)); - mutex_unlock(&pxp->mutex); - goto wait_for_idle; + restart = true; + goto out_unlock; case XE_PXP_SUSPENDED: default: drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status); ret = -EIO; - break; + goto out_unlock; } mutex_unlock(&pxp->mutex); - if (ret) - goto out; - if (!completion_done(&pxp->termination)) { ret = pxp_terminate_hw(pxp); if (ret) { drm_err(&pxp->xe->drm, "PXP termination failed before start\n"); mutex_lock(&pxp->mutex); pxp->status = XE_PXP_ERROR; - mutex_unlock(&pxp->mutex); - goto out; + goto out_unlock; } goto wait_for_idle; @@ -639,21 +626,59 @@ wait_for_idle: if (pxp->status != XE_PXP_START_IN_PROGRESS) { drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status); pxp->status = XE_PXP_NEEDS_TERMINATION; - mutex_unlock(&pxp->mutex); - goto wait_for_idle; + restart = true; + goto out_unlock; } /* If everything went ok, update the status and add the queue to the list */ - if (!ret) { + if (!ret) pxp->status = XE_PXP_ACTIVE; - __exec_queue_add(pxp, q); - } else { + else pxp->status = XE_PXP_ERROR; - } +out_unlock: mutex_unlock(&pxp->mutex); -out: + if (restart) + goto wait_for_idle; + + return ret; +} + +/** + * xe_pxp_exec_queue_add - add a queue to the PXP list + * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) + * @q: the queue to add to the list + * + * If PXP is enabled and the prerequisites are done, start the PXP default + * session (if not already running) and add the queue to the PXP list. + * + * Returns 0 if the PXP session is running and the queue is in the list, + * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done, + * other errno value if something goes wrong during the session start. + */ +int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q) +{ + int ret; + + if (!xe_pxp_is_enabled(pxp)) + return -ENODEV; + + /* + * Runtime suspend kills PXP, so we take a reference to prevent it from + * happening while we have active queues that use PXP + */ + xe_pm_runtime_get(pxp->xe); + +start: + ret = pxp_start(pxp, q->pxp.type); + + if (!ret) { + ret = __exec_queue_add(pxp, q); + if (ret == -EBUSY) + goto start; + } + /* * in the successful case the PM ref is released from * xe_pxp_exec_queue_remove diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 2dbf4066d86f..e8e1743dcb1e 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -683,8 +683,8 @@ static int query_oa_units(struct xe_device *xe, du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt); du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS | DRM_XE_OA_CAPS_OA_BUFFER_SIZE | - DRM_XE_OA_CAPS_WAIT_NUM_REPORTS; - + DRM_XE_OA_CAPS_WAIT_NUM_REPORTS | + DRM_XE_OA_CAPS_OAM; j = 0; for_each_hw_engine(hwe, gt, hwe_id) { if (!xe_hw_engine_is_reserved(hwe) && diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c index 86d47aaf0358..125c836e0ee4 100644 --- a/drivers/gpu/drm/xe/xe_shrinker.c +++ b/drivers/gpu/drm/xe/xe_shrinker.c @@ -5,6 +5,7 @@ #include <linux/shrinker.h> +#include <drm/drm_managed.h> #include <drm/ttm/ttm_backup.h> #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_tt.h> @@ -213,24 +214,34 @@ static void xe_shrinker_pm(struct work_struct *work) xe_pm_runtime_put(shrinker->xe); } +static void xe_shrinker_fini(struct drm_device *drm, void *arg) +{ + struct xe_shrinker *shrinker = arg; + + xe_assert(shrinker->xe, !shrinker->shrinkable_pages); + xe_assert(shrinker->xe, !shrinker->purgeable_pages); + shrinker_free(shrinker->shrink); + flush_work(&shrinker->pm_worker); + kfree(shrinker); +} + /** * xe_shrinker_create() - Create an xe per-device shrinker * @xe: Pointer to the xe device. * - * Returns: A pointer to the created shrinker on success, - * Negative error code on failure. + * Return: %0 on success. Negative error code on failure. */ -struct xe_shrinker *xe_shrinker_create(struct xe_device *xe) +int xe_shrinker_create(struct xe_device *xe) { struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL); if (!shrinker) - return ERR_PTR(-ENOMEM); + return -ENOMEM; shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique); if (!shrinker->shrink) { kfree(shrinker); - return ERR_PTR(-ENOMEM); + return -ENOMEM; } INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm); @@ -240,19 +251,7 @@ struct xe_shrinker *xe_shrinker_create(struct xe_device *xe) shrinker->shrink->scan_objects = xe_shrinker_scan; shrinker->shrink->private_data = shrinker; shrinker_register(shrinker->shrink); + xe->mem.shrinker = shrinker; - return shrinker; -} - -/** - * xe_shrinker_destroy() - Destroy an xe per-device shrinker - * @shrinker: Pointer to the shrinker to destroy. - */ -void xe_shrinker_destroy(struct xe_shrinker *shrinker) -{ - xe_assert(shrinker->xe, !shrinker->shrinkable_pages); - xe_assert(shrinker->xe, !shrinker->purgeable_pages); - shrinker_free(shrinker->shrink); - flush_work(&shrinker->pm_worker); - kfree(shrinker); + return drmm_add_action_or_reset(&xe->drm, xe_shrinker_fini, shrinker); } diff --git a/drivers/gpu/drm/xe/xe_shrinker.h b/drivers/gpu/drm/xe/xe_shrinker.h index 28a038f4fcbf..5132ae5192e1 100644 --- a/drivers/gpu/drm/xe/xe_shrinker.h +++ b/drivers/gpu/drm/xe/xe_shrinker.h @@ -11,8 +11,6 @@ struct xe_device; void xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable); -struct xe_shrinker *xe_shrinker_create(struct xe_device *xe); - -void xe_shrinker_destroy(struct xe_shrinker *shrinker); +int xe_shrinker_create(struct xe_device *xe); #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c index c1275e64aa9c..6526fe450e55 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_sriov_vf.c @@ -7,12 +7,15 @@ #include "xe_assert.h" #include "xe_device.h" +#include "xe_gt.h" #include "xe_gt_sriov_printk.h" #include "xe_gt_sriov_vf.h" +#include "xe_guc_ct.h" #include "xe_pm.h" #include "xe_sriov.h" #include "xe_sriov_printk.h" #include "xe_sriov_vf.h" +#include "xe_tile_sriov_vf.h" /** * DOC: VF restore procedure in PF KMD and VF KMD @@ -121,6 +124,15 @@ * | | | */ +static bool vf_migration_supported(struct xe_device *xe) +{ + /* + * TODO: Add conditions to allow specific platforms, when they're + * supported at production quality. + */ + return IS_ENABLED(CONFIG_DRM_XE_DEBUG); +} + static void migration_worker_func(struct work_struct *w); /** @@ -130,6 +142,9 @@ static void migration_worker_func(struct work_struct *w); void xe_sriov_vf_init_early(struct xe_device *xe) { INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func); + + if (!vf_migration_supported(xe)) + xe_sriov_info(xe, "migration not supported by this module version\n"); } /** @@ -157,6 +172,20 @@ static int vf_post_migration_requery_guc(struct xe_device *xe) return ret; } +static void vf_post_migration_fixup_ctb(struct xe_device *xe) +{ + struct xe_gt *gt; + unsigned int id; + + xe_assert(xe, IS_SRIOV_VF(xe)); + + for_each_gt(gt, xe, id) { + s32 shift = xe_gt_sriov_vf_ggtt_shift(gt); + + xe_guc_ct_fixup_messages_with_ggtt(>->uc.guc.ct, shift); + } +} + /* * vf_post_migration_imminent - Check if post-restore recovery is coming. * @xe: the &xe_device struct instance @@ -170,6 +199,25 @@ static bool vf_post_migration_imminent(struct xe_device *xe) work_pending(&xe->sriov.vf.migration.worker); } +static bool vf_post_migration_fixup_ggtt_nodes(struct xe_device *xe) +{ + bool need_fixups = false; + struct xe_tile *tile; + unsigned int id; + + for_each_tile(tile, xe, id) { + struct xe_gt *gt = tile->primary_gt; + s64 shift; + + shift = xe_gt_sriov_vf_ggtt_shift(gt); + if (shift) { + need_fixups = true; + xe_tile_sriov_vf_fixup_ggtt_nodes(tile, shift); + } + } + return need_fixups; +} + /* * Notify all GuCs about resource fixups apply finished. */ @@ -191,6 +239,7 @@ skip: static void vf_post_migration_recovery(struct xe_device *xe) { + bool need_fixups; int err; drm_dbg(&xe->drm, "migration recovery in progress\n"); @@ -200,8 +249,17 @@ static void vf_post_migration_recovery(struct xe_device *xe) goto defer; if (unlikely(err)) goto fail; + if (!vf_migration_supported(xe)) { + xe_sriov_err(xe, "migration not supported by this module version\n"); + err = -ENOTRECOVERABLE; + goto fail; + } + need_fixups = vf_post_migration_fixup_ggtt_nodes(xe); /* FIXME: add the recovery steps */ + if (need_fixups) + vf_post_migration_fixup_ctb(xe); + vf_post_migration_notify_resfix_done(xe); xe_pm_runtime_put(xe); drm_notice(&xe->drm, "migration recovery ended\n"); diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index f0b167b3fb6a..26418e9bdff0 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -45,21 +45,6 @@ static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r) return gpusvm_to_vm(r->gpusvm); } -static unsigned long xe_svm_range_start(struct xe_svm_range *range) -{ - return drm_gpusvm_range_start(&range->base); -} - -static unsigned long xe_svm_range_end(struct xe_svm_range *range) -{ - return drm_gpusvm_range_end(&range->base); -} - -static unsigned long xe_svm_range_size(struct xe_svm_range *range) -{ - return drm_gpusvm_range_size(&range->base); -} - #define range_debug(r__, operaton__) \ vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \ "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \ @@ -103,11 +88,6 @@ static void xe_svm_range_free(struct drm_gpusvm_range *range) kfree(range); } -static struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r) -{ - return container_of(r, struct xe_svm_range, base); -} - static void xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range, const struct mmu_notifier_range *mmu_range) @@ -161,7 +141,12 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r, for_each_tile(tile, xe, id) if (xe_pt_zap_ptes_range(tile, vm, range)) { tile_mask |= BIT(id); - range->tile_invalidated |= BIT(id); + /* + * WRITE_ONCE pairs with READ_ONCE in + * xe_vm_has_valid_gpu_mapping() + */ + WRITE_ONCE(range->tile_invalidated, + range->tile_invalidated | BIT(id)); } return tile_mask; @@ -187,14 +172,9 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm, { struct xe_vm *vm = gpusvm_to_vm(gpusvm); struct xe_device *xe = vm->xe; - struct xe_tile *tile; struct drm_gpusvm_range *r, *first; - struct xe_gt_tlb_invalidation_fence - fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE]; u64 adj_start = mmu_range->start, adj_end = mmu_range->end; u8 tile_mask = 0; - u8 id; - u32 fence_id = 0; long err; xe_svm_assert_in_notifier(vm); @@ -240,42 +220,8 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm, xe_device_wmb(xe); - for_each_tile(tile, xe, id) { - if (tile_mask & BIT(id)) { - int err; - - xe_gt_tlb_invalidation_fence_init(tile->primary_gt, - &fence[fence_id], true); - - err = xe_gt_tlb_invalidation_range(tile->primary_gt, - &fence[fence_id], - adj_start, - adj_end, - vm->usm.asid); - if (WARN_ON_ONCE(err < 0)) - goto wait; - ++fence_id; - - if (!tile->media_gt) - continue; - - xe_gt_tlb_invalidation_fence_init(tile->media_gt, - &fence[fence_id], true); - - err = xe_gt_tlb_invalidation_range(tile->media_gt, - &fence[fence_id], - adj_start, - adj_end, - vm->usm.asid); - if (WARN_ON_ONCE(err < 0)) - goto wait; - ++fence_id; - } - } - -wait: - for (id = 0; id < fence_id; ++id) - xe_gt_tlb_invalidation_fence_wait(&fence[id]); + err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask); + WARN_ON_ONCE(err); range_notifier_event_end: r = first; @@ -662,13 +608,72 @@ static bool xe_svm_range_is_valid(struct xe_svm_range *range, struct xe_tile *tile, bool devmem_only) { - /* - * Advisory only check whether the range currently has a valid mapping, - * READ_ONCE pairs with WRITE_ONCE in xe_pt.c - */ - return ((READ_ONCE(range->tile_present) & - ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) && - (!devmem_only || xe_svm_range_in_vram(range)); + return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present, + range->tile_invalidated) && + (!devmem_only || xe_svm_range_in_vram(range))); +} + +/** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM + * @vm: xe_vm pointer + * @range: Pointer to the SVM range structure + * + * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM + * and migrates them to SMEM + */ +void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range) +{ + if (xe_svm_range_in_vram(range)) + drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base); +} + +/** + * xe_svm_range_validate() - Check if the SVM range is valid + * @vm: xe_vm pointer + * @range: Pointer to the SVM range structure + * @tile_mask: Mask representing the tiles to be checked + * @devmem_preferred : if true range needs to be in devmem + * + * The xe_svm_range_validate() function checks if a range is + * valid and located in the desired memory region. + * + * Return: true if the range is valid, false otherwise + */ +bool xe_svm_range_validate(struct xe_vm *vm, + struct xe_svm_range *range, + u8 tile_mask, bool devmem_preferred) +{ + bool ret; + + xe_svm_notifier_lock(vm); + + ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask && + (devmem_preferred == range->base.flags.has_devmem_pages); + + xe_svm_notifier_unlock(vm); + + return ret; +} + +/** + * xe_svm_find_vma_start - Find start of CPU VMA + * @vm: xe_vm pointer + * @start: start address + * @end: end address + * @vma: Pointer to struct xe_vma + * + * + * This function searches for a cpu vma, within the specified + * range [start, end] in the given VM. It adjusts the range based on the + * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX. + * + * Return: The starting address of the VMA within the range, + * or ULONG_MAX if no VMA is found + */ +u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma) +{ + return drm_gpusvm_find_vma_start(&vm->svm.gpusvm, + max(start, xe_vma_start(vma)), + min(end, xe_vma_end(vma))); } #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) @@ -677,9 +682,19 @@ static struct xe_vram_region *tile_to_vr(struct xe_tile *tile) return &tile->mem.vram; } -static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, - struct xe_svm_range *range, - const struct drm_gpusvm_ctx *ctx) +/** + * xe_svm_alloc_vram()- Allocate device memory pages for range, + * migrating existing data. + * @vm: The VM. + * @tile: tile to allocate vram from + * @range: SVM range + * @ctx: DRM GPU SVM context + * + * Return: 0 on success, error code on failure. + */ +int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, + struct xe_svm_range *range, + const struct drm_gpusvm_ctx *ctx) { struct mm_struct *mm = vm->svm.gpusvm.mm; struct xe_vram_region *vr = tile_to_vr(tile); @@ -733,13 +748,6 @@ unlock: return err; } -#else -static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, - struct xe_svm_range *range, - const struct drm_gpusvm_ctx *ctx) -{ - return -EOPNOTSUPP; -} #endif static bool supports_4K_migration(struct xe_device *xe) @@ -750,21 +758,31 @@ static bool supports_4K_migration(struct xe_device *xe) return true; } -static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, - struct xe_vma *vma) +/** + * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not + * @range: SVM range for which migration needs to be decided + * @vma: vma which has range + * @preferred_region_is_vram: preferred region for range is vram + * + * Return: True for range needing migration and migration is supported else false + */ +bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, + bool preferred_region_is_vram) { struct xe_vm *vm = range_to_vm(&range->base); u64 range_size = xe_svm_range_size(range); - if (!range->base.flags.migrate_devmem) + if (!range->base.flags.migrate_devmem || !preferred_region_is_vram) return false; - if (xe_svm_range_in_vram(range)) { - drm_dbg(&vm->xe->drm, "Range is already in VRAM\n"); + xe_assert(vm->xe, IS_DGFX(vm->xe)); + + if (preferred_region_is_vram && xe_svm_range_in_vram(range)) { + drm_info(&vm->xe->drm, "Range is already in VRAM\n"); return false; } - if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) { + if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) { drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n"); return false; } @@ -798,14 +816,13 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, .devmem_only = atomic && IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR), .timeslice_ms = atomic && IS_DGFX(vm->xe) && - IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? 5 : 0, + IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? + vm->xe->atomic_svm_timeslice_ms : 0, }; struct xe_svm_range *range; - struct drm_gpusvm_range *r; - struct drm_exec exec; struct dma_fence *fence; - int migrate_try_count = ctx.devmem_only ? 3 : 1; struct xe_tile *tile = gt_to_tile(gt); + int migrate_try_count = ctx.devmem_only ? 3 : 1; ktime_t end = 0; int err; @@ -820,23 +837,21 @@ retry: if (err) return err; - r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr, - xe_vma_start(vma), xe_vma_end(vma), - &ctx); - if (IS_ERR(r)) - return PTR_ERR(r); + range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx); + + if (IS_ERR(range)) + return PTR_ERR(range); - if (ctx.devmem_only && !r->flags.migrate_devmem) + if (ctx.devmem_only && !range->base.flags.migrate_devmem) return -EACCES; - range = to_xe_range(r); if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) return 0; range_debug(range, "PAGE FAULT"); if (--migrate_try_count >= 0 && - xe_svm_range_needs_migrate_to_vram(range, vma)) { + xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) { err = xe_svm_alloc_vram(vm, tile, range, &ctx); ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ if (err) { @@ -855,16 +870,11 @@ retry: } range_debug(range, "GET PAGES"); - err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx); + err = xe_svm_range_get_pages(vm, range, &ctx); /* Corner where CPU mappings have changed */ if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) { ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ if (migrate_try_count > 0 || !ctx.devmem_only) { - if (err == -EOPNOTSUPP) { - range_debug(range, "PAGE FAULT - EVICT PAGES"); - drm_gpusvm_range_evict(&vm->svm.gpusvm, - &range->base); - } drm_dbg(&vm->xe->drm, "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n", vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); @@ -884,30 +894,21 @@ retry: range_debug(range, "PAGE FAULT - BIND"); retry_bind: - drm_exec_init(&exec, 0, 0); - drm_exec_until_all_locked(&exec) { - err = drm_exec_lock_obj(&exec, vm->gpuvm.r_obj); - drm_exec_retry_on_contention(&exec); - if (err) { - drm_exec_fini(&exec); - goto err_out; - } - - fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id)); - if (IS_ERR(fence)) { - drm_exec_fini(&exec); - err = PTR_ERR(fence); - if (err == -EAGAIN) { - ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ - range_debug(range, "PAGE FAULT - RETRY BIND"); - goto retry; - } - if (xe_vm_validate_should_retry(&exec, err, &end)) - goto retry_bind; - goto err_out; + xe_vm_lock(vm, false); + fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id)); + if (IS_ERR(fence)) { + xe_vm_unlock(vm); + err = PTR_ERR(fence); + if (err == -EAGAIN) { + ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ + range_debug(range, "PAGE FAULT - RETRY BIND"); + goto retry; } + if (xe_vm_validate_should_retry(NULL, err, &end)) + goto retry_bind; + goto err_out; } - drm_exec_fini(&exec); + xe_vm_unlock(vm); dma_fence_wait(fence, false); dma_fence_put(fence); @@ -946,6 +947,56 @@ int xe_svm_bo_evict(struct xe_bo *bo) return drm_gpusvm_evict_to_ram(&bo->devmem_allocation); } +/** + * xe_svm_range_find_or_insert- Find or insert GPU SVM range + * @vm: xe_vm pointer + * @addr: address for which range needs to be found/inserted + * @vma: Pointer to struct xe_vma which mirrors CPU + * @ctx: GPU SVM context + * + * This function finds or inserts a newly allocated a SVM range based on the + * address. + * + * Return: Pointer to the SVM range on success, ERR_PTR() on failure. + */ +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr, + struct xe_vma *vma, struct drm_gpusvm_ctx *ctx) +{ + struct drm_gpusvm_range *r; + + r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)), + xe_vma_start(vma), xe_vma_end(vma), ctx); + if (IS_ERR(r)) + return ERR_PTR(PTR_ERR(r)); + + return to_xe_range(r); +} + +/** + * xe_svm_range_get_pages() - Get pages for a SVM range + * @vm: Pointer to the struct xe_vm + * @range: Pointer to the xe SVM range structure + * @ctx: GPU SVM context + * + * This function gets pages for a SVM range and ensures they are mapped for + * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range. + * + * Return: 0 on success, negative error code on failure. + */ +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, + struct drm_gpusvm_ctx *ctx) +{ + int err = 0; + + err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx); + if (err == -EOPNOTSUPP) { + range_debug(range, "PAGE FAULT - EVICT PAGES"); + drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base); + } + + return err; +} + #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) static struct drm_pagemap_device_addr @@ -1024,6 +1075,13 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr) return 0; } #else +int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, + struct xe_svm_range *range, + const struct drm_gpusvm_ctx *ctx) +{ + return -EOPNOTSUPP; +} + int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr) { return 0; diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index 30fc78b85b30..19ce4f2754a7 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -70,6 +70,27 @@ int xe_svm_bo_evict(struct xe_bo *bo); void xe_svm_range_debug(struct xe_svm_range *range, const char *operation); +int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, + struct xe_svm_range *range, + const struct drm_gpusvm_ctx *ctx); + +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr, + struct xe_vma *vma, struct drm_gpusvm_ctx *ctx); + +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, + struct drm_gpusvm_ctx *ctx); + +bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, + bool preferred_region_is_vram); + +void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range); + +bool xe_svm_range_validate(struct xe_vm *vm, + struct xe_svm_range *range, + u8 tile_mask, bool devmem_preferred); + +u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma); + /** * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping * @range: SVM range @@ -82,6 +103,53 @@ static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range) return range->base.flags.has_dma_mapping; } +/** + * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range + * @r: Pointer to the drm_gpusvm_range structure + * + * This function takes a pointer to a drm_gpusvm_range structure and + * converts it to a pointer to the containing xe_svm_range structure. + * + * Return: Pointer to the xe_svm_range structure + */ +static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r) +{ + return container_of(r, struct xe_svm_range, base); +} + +/** + * xe_svm_range_start() - SVM range start address + * @range: SVM range + * + * Return: start address of range. + */ +static inline unsigned long xe_svm_range_start(struct xe_svm_range *range) +{ + return drm_gpusvm_range_start(&range->base); +} + +/** + * xe_svm_range_end() - SVM range end address + * @range: SVM range + * + * Return: end address of range. + */ +static inline unsigned long xe_svm_range_end(struct xe_svm_range *range) +{ + return drm_gpusvm_range_end(&range->base); +} + +/** + * xe_svm_range_size() - SVM range size + * @range: SVM range + * + * Return: Size of range. + */ +static inline unsigned long xe_svm_range_size(struct xe_svm_range *range) +{ + return drm_gpusvm_range_size(&range->base); +} + #define xe_svm_assert_in_notifier(vm__) \ lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock) @@ -97,6 +165,8 @@ void xe_svm_flush(struct xe_vm *vm); #include <linux/interval_tree.h> struct drm_pagemap_device_addr; +struct drm_gpusvm_ctx; +struct drm_gpusvm_range; struct xe_bo; struct xe_gt; struct xe_vm; @@ -167,6 +237,74 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation) { } +static inline +int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, + struct xe_svm_range *range, + const struct drm_gpusvm_ctx *ctx) +{ + return -EOPNOTSUPP; +} + +static inline +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr, + struct xe_vma *vma, struct drm_gpusvm_ctx *ctx) +{ + return ERR_PTR(-EINVAL); +} + +static inline +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, + struct drm_gpusvm_ctx *ctx) +{ + return -EINVAL; +} + +static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r) +{ + return NULL; +} + +static inline unsigned long xe_svm_range_start(struct xe_svm_range *range) +{ + return 0; +} + +static inline unsigned long xe_svm_range_end(struct xe_svm_range *range) +{ + return 0; +} + +static inline unsigned long xe_svm_range_size(struct xe_svm_range *range) +{ + return 0; +} + +static inline +bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, + u32 region) +{ + return false; +} + +static inline +void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range) +{ +} + +static inline +bool xe_svm_range_validate(struct xe_vm *vm, + struct xe_svm_range *range, + u8 tile_mask, bool devmem_preferred) +{ + return false; +} + +static inline +u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma) +{ + return ULONG_MAX; +} + #define xe_svm_assert_in_notifier(...) do {} while (0) #define xe_svm_range_has_dma_mapping(...) false diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c index 0771acbbf367..672faa0b67f1 100644 --- a/drivers/gpu/drm/xe/xe_tile.c +++ b/drivers/gpu/drm/xe/xe_tile.c @@ -87,13 +87,9 @@ */ static int xe_tile_alloc(struct xe_tile *tile) { - struct drm_device *drm = &tile_to_xe(tile)->drm; - - tile->mem.ggtt = drmm_kzalloc(drm, sizeof(*tile->mem.ggtt), - GFP_KERNEL); + tile->mem.ggtt = xe_ggtt_alloc(tile); if (!tile->mem.ggtt) return -ENOMEM; - tile->mem.ggtt->tile = tile; return 0; } diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c new file mode 100644 index 000000000000..f221dbed16f0 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <drm/drm_managed.h> + +#include "regs/xe_gtt_defs.h" + +#include "xe_assert.h" +#include "xe_ggtt.h" +#include "xe_gt_sriov_vf.h" +#include "xe_sriov.h" +#include "xe_sriov_printk.h" +#include "xe_tile_sriov_vf.h" +#include "xe_wopcm.h" + +static int vf_init_ggtt_balloons(struct xe_tile *tile) +{ + struct xe_ggtt *ggtt = tile->mem.ggtt; + + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile))); + + tile->sriov.vf.ggtt_balloon[0] = xe_ggtt_node_init(ggtt); + if (IS_ERR(tile->sriov.vf.ggtt_balloon[0])) + return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]); + + tile->sriov.vf.ggtt_balloon[1] = xe_ggtt_node_init(ggtt); + if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) { + xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]); + return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]); + } + + return 0; +} + +/** + * xe_tile_sriov_vf_balloon_ggtt_locked - Insert balloon nodes to limit used GGTT address range. + * @tile: the &xe_tile struct instance + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile) +{ + u64 ggtt_base = xe_gt_sriov_vf_ggtt_base(tile->primary_gt); + u64 ggtt_size = xe_gt_sriov_vf_ggtt(tile->primary_gt); + struct xe_device *xe = tile_to_xe(tile); + u64 wopcm = xe_wopcm_size(xe); + u64 start, end; + int err; + + xe_tile_assert(tile, IS_SRIOV_VF(xe)); + xe_tile_assert(tile, ggtt_size); + lockdep_assert_held(&tile->mem.ggtt->lock); + + /* + * VF can only use part of the GGTT as allocated by the PF: + * + * WOPCM GUC_GGTT_TOP + * |<------------ Total GGTT size ------------------>| + * + * VF GGTT base -->|<- size ->| + * + * +--------------------+----------+-----------------+ + * |////////////////////| block |\\\\\\\\\\\\\\\\\| + * +--------------------+----------+-----------------+ + * + * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->| + */ + + if (ggtt_base < wopcm || ggtt_base > GUC_GGTT_TOP || + ggtt_size > GUC_GGTT_TOP - ggtt_base) { + xe_sriov_err(xe, "tile%u: Invalid GGTT configuration: %#llx-%#llx\n", + tile->id, ggtt_base, ggtt_base + ggtt_size - 1); + return -ERANGE; + } + + start = wopcm; + end = ggtt_base; + if (end != start) { + err = xe_ggtt_node_insert_balloon_locked(tile->sriov.vf.ggtt_balloon[0], + start, end); + if (err) + return err; + } + + start = ggtt_base + ggtt_size; + end = GUC_GGTT_TOP; + if (end != start) { + err = xe_ggtt_node_insert_balloon_locked(tile->sriov.vf.ggtt_balloon[1], + start, end); + if (err) { + xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]); + return err; + } + } + + return 0; +} + +static int vf_balloon_ggtt(struct xe_tile *tile) +{ + struct xe_ggtt *ggtt = tile->mem.ggtt; + int err; + + mutex_lock(&ggtt->lock); + err = xe_tile_sriov_vf_balloon_ggtt_locked(tile); + mutex_unlock(&ggtt->lock); + + return err; +} + +/** + * xe_tile_sriov_vf_deballoon_ggtt_locked - Remove balloon nodes. + * @tile: the &xe_tile struct instance + */ +void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile) +{ + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile))); + + xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[1]); + xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]); +} + +static void vf_deballoon_ggtt(struct xe_tile *tile) +{ + mutex_lock(&tile->mem.ggtt->lock); + xe_tile_sriov_vf_deballoon_ggtt_locked(tile); + mutex_unlock(&tile->mem.ggtt->lock); +} + +static void vf_fini_ggtt_balloons(struct xe_tile *tile) +{ + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile))); + + xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[1]); + xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]); +} + +static void cleanup_ggtt(struct drm_device *drm, void *arg) +{ + struct xe_tile *tile = arg; + + vf_deballoon_ggtt(tile); + vf_fini_ggtt_balloons(tile); +} + +/** + * xe_tile_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration. + * @tile: the &xe_tile + * + * This function is for VF use only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile) +{ + struct xe_device *xe = tile_to_xe(tile); + int err; + + err = vf_init_ggtt_balloons(tile); + if (err) + return err; + + err = vf_balloon_ggtt(tile); + if (err) { + vf_fini_ggtt_balloons(tile); + return err; + } + + return drmm_add_action_or_reset(&xe->drm, cleanup_ggtt, tile); +} + +/** + * DOC: GGTT nodes shifting during VF post-migration recovery + * + * The first fixup applied to the VF KMD structures as part of post-migration + * recovery is shifting nodes within &xe_ggtt instance. The nodes are moved + * from range previously assigned to this VF, into newly provisioned area. + * The changes include balloons, which are resized accordingly. + * + * The balloon nodes are there to eliminate unavailable ranges from use: one + * reserves the GGTT area below the range for current VF, and another one + * reserves area above. + * + * Below is a GGTT layout of example VF, with a certain address range assigned to + * said VF, and inaccessible areas above and below: + * + * 0 4GiB + * |<--------------------------- Total GGTT size ----------------------------->| + * WOPCM GUC_TOP + * |<-------------- Area mappable by xe_ggtt instance ---------------->| + * + * +---+---------------------------------+----------+----------------------+---+ + * |\\\|/////////////////////////////////| VF mem |//////////////////////|\\\| + * +---+---------------------------------+----------+----------------------+---+ + * + * Hardware enforced access rules before migration: + * + * |<------- inaccessible for VF ------->|<VF owned>|<-- inaccessible for VF ->| + * + * GGTT nodes used for tracking allocations: + * + * |<---------- balloon ------------>|<- nodes->|<----- balloon ------>| + * + * After the migration, GGTT area assigned to the VF might have shifted, either + * to lower or to higher address. But we expect the total size and extra areas to + * be identical, as migration can only happen between matching platforms. + * Below is an example of GGTT layout of the VF after migration. Content of the + * GGTT for VF has been moved to a new area, and we receive its address from GuC: + * + * +---+----------------------+----------+---------------------------------+---+ + * |\\\|//////////////////////| VF mem |/////////////////////////////////|\\\| + * +---+----------------------+----------+---------------------------------+---+ + * + * Hardware enforced access rules after migration: + * + * |<- inaccessible for VF -->|<VF owned>|<------- inaccessible for VF ------->| + * + * So the VF has a new slice of GGTT assigned, and during migration process, the + * memory content was copied to that new area. But the &xe_ggtt nodes are still + * tracking allocations using the old addresses. The nodes within VF owned area + * have to be shifted, and balloon nodes need to be resized to properly mask out + * areas not owned by the VF. + * + * Fixed &xe_ggtt nodes used for tracking allocations: + * + * |<------ balloon ------>|<- nodes->|<----------- balloon ----------->| + * + * Due to use of GPU profiles, we do not expect the old and new GGTT ares to + * overlap; but our node shifting will fix addresses properly regardless. + */ + +/** + * xe_tile_sriov_vf_fixup_ggtt_nodes - Shift GGTT allocations to match assigned range. + * @tile: the &xe_tile struct instance + * @shift: the shift value + * + * Since Global GTT is not virtualized, each VF has an assigned range + * within the global space. This range might have changed during migration, + * which requires all memory addresses pointing to GGTT to be shifted. + */ +void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift) +{ + struct xe_ggtt *ggtt = tile->mem.ggtt; + + mutex_lock(&ggtt->lock); + + xe_tile_sriov_vf_deballoon_ggtt_locked(tile); + xe_ggtt_shift_nodes_locked(ggtt, shift); + xe_tile_sriov_vf_balloon_ggtt_locked(tile); + + mutex_unlock(&ggtt->lock); +} diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h new file mode 100644 index 000000000000..93eb043171e8 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_TILE_SRIOV_VF_H_ +#define _XE_TILE_SRIOV_VF_H_ + +#include <linux/types.h> + +struct xe_tile; + +int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile); +int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile); +void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile); +void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift); + +#endif diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c index 49ddbda7cdef..828b45b24c23 100644 --- a/drivers/gpu/drm/xe/xe_tuning.c +++ b/drivers/gpu/drm/xe/xe_tuning.c @@ -98,6 +98,11 @@ static const struct xe_rtp_entry_sr engine_tunings[] = { ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE)) }, + { XE_RTP_NAME("Tuning: Disable NULL query for Anyhit Shader"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, XE_RTP_END_VERSION_UNDEFINED), + FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY)) + }, }; static const struct xe_rtp_entry_sr lrc_tunings[] = { diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index 2741849bbf4d..6d0869518652 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -16,6 +16,7 @@ #include "xe_gsc.h" #include "xe_gt.h" #include "xe_gt_printk.h" +#include "xe_gt_sriov_vf.h" #include "xe_guc.h" #include "xe_map.h" #include "xe_mmio.h" @@ -662,11 +663,39 @@ do { \ ver_->major, ver_->minor, ver_->patch); \ } while (0) +static void uc_fw_vf_override(struct xe_uc_fw *uc_fw) +{ + struct xe_uc_fw_version *compat = &uc_fw->versions.found[XE_UC_FW_VER_COMPATIBILITY]; + struct xe_uc_fw_version *wanted = &uc_fw->versions.wanted; + + /* Only GuC/HuC are supported */ + if (uc_fw->type != XE_UC_FW_TYPE_GUC && uc_fw->type != XE_UC_FW_TYPE_HUC) + uc_fw->path = NULL; + + /* VF will support only firmwares that driver can autoselect */ + xe_uc_fw_change_status(uc_fw, uc_fw->path ? + XE_UC_FIRMWARE_PRELOADED : + XE_UC_FIRMWARE_NOT_SUPPORTED); + + if (!xe_uc_fw_is_supported(uc_fw)) + return; + + /* PF is doing the loading, so we don't need a path on the VF */ + uc_fw->path = "Loaded by PF"; + + /* The GuC versions are set up during the VF bootstrap */ + if (uc_fw->type == XE_UC_FW_TYPE_GUC) { + uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY; + xe_gt_sriov_vf_guc_versions(uc_fw_to_gt(uc_fw), wanted, compat); + } +} + static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmware_p) { struct xe_device *xe = uc_fw_to_xe(uc_fw); + struct xe_gt *gt = uc_fw_to_gt(uc_fw); + struct drm_printer p = xe_gt_info_printer(gt); struct device *dev = xe->drm.dev; - struct drm_printer p = drm_info_printer(dev); const struct firmware *fw = NULL; int err; @@ -675,20 +704,13 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar * before we're looked at the HW caps to see if we have uc support */ BUILD_BUG_ON(XE_UC_FIRMWARE_UNINITIALIZED); - xe_assert(xe, !uc_fw->status); - xe_assert(xe, !uc_fw->path); + xe_gt_assert(gt, !uc_fw->status); + xe_gt_assert(gt, !uc_fw->path); uc_fw_auto_select(xe, uc_fw); if (IS_SRIOV_VF(xe)) { - /* Only GuC/HuC are supported */ - if (uc_fw->type != XE_UC_FW_TYPE_GUC && - uc_fw->type != XE_UC_FW_TYPE_HUC) - uc_fw->path = NULL; - /* VF will support only firmwares that driver can autoselect */ - xe_uc_fw_change_status(uc_fw, uc_fw->path ? - XE_UC_FIRMWARE_PRELOADED : - XE_UC_FIRMWARE_NOT_SUPPORTED); + uc_fw_vf_override(uc_fw); return 0; } @@ -700,7 +722,7 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar if (!xe_uc_fw_is_supported(uc_fw)) { if (uc_fw->type == XE_UC_FW_TYPE_GUC) { - drm_err(&xe->drm, "No GuC firmware defined for platform\n"); + xe_gt_err(gt, "No GuC firmware defined for platform\n"); return -ENOENT; } return 0; @@ -709,7 +731,7 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar /* an empty path means the firmware is disabled */ if (!xe_device_uc_enabled(xe) || !(*uc_fw->path)) { xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_DISABLED); - drm_dbg(&xe->drm, "%s disabled", xe_uc_fw_type_repr(uc_fw->type)); + xe_gt_dbg(gt, "%s disabled\n", xe_uc_fw_type_repr(uc_fw->type)); return 0; } @@ -742,10 +764,10 @@ fail: XE_UC_FIRMWARE_MISSING : XE_UC_FIRMWARE_ERROR); - drm_notice(&xe->drm, "%s firmware %s: fetch failed with error %d\n", - xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); - drm_info(&xe->drm, "%s firmware(s) can be downloaded from %s\n", - xe_uc_fw_type_repr(uc_fw->type), XE_UC_FIRMWARE_URL); + xe_gt_notice(gt, "%s firmware %s: fetch failed with error %pe\n", + xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, ERR_PTR(err)); + xe_gt_info(gt, "%s firmware(s) can be downloaded from %s\n", + xe_uc_fw_type_repr(uc_fw->type), XE_UC_FIRMWARE_URL); release_firmware(fw); /* OK even if fw is NULL */ diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h index ad3b35a0e6eb..914026015019 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw_types.h +++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h @@ -65,6 +65,8 @@ enum xe_uc_fw_type { * struct xe_uc_fw_version - Version for XE micro controller firmware */ struct xe_uc_fw_version { + /** @branch: branch version of the FW (not always available) */ + u16 branch; /** @major: major version of the FW */ u16 major; /** @minor: minor version of the FW */ diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 861577746929..04d1a43b81e3 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -732,7 +732,9 @@ int xe_vm_userptr_pin(struct xe_vm *vm) DMA_RESV_USAGE_BOOKKEEP, false, MAX_SCHEDULE_TIMEOUT); + down_read(&vm->userptr.notifier_lock); err = xe_vm_invalidate_vma(&uvma->vma); + up_read(&vm->userptr.notifier_lock); xe_vm_unlock(vm); if (err) break; @@ -798,21 +800,47 @@ static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds) } ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO); +static void xe_vma_svm_prefetch_op_fini(struct xe_vma_op *op) +{ + struct xe_vma *vma; + + vma = gpuva_to_vma(op->base.prefetch.va); + + if (op->base.op == DRM_GPUVA_OP_PREFETCH && xe_vma_is_cpu_addr_mirror(vma)) + xa_destroy(&op->prefetch_range.range); +} + +static void xe_vma_svm_prefetch_ops_fini(struct xe_vma_ops *vops) +{ + struct xe_vma_op *op; + + if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH)) + return; + + list_for_each_entry(op, &vops->list, link) + xe_vma_svm_prefetch_op_fini(op); +} + static void xe_vma_ops_fini(struct xe_vma_ops *vops) { int i; + xe_vma_svm_prefetch_ops_fini(vops); + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) kfree(vops->pt_update_ops[i].ops); } -static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask) +static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask, int inc_val) { int i; + if (!inc_val) + return; + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) if (BIT(i) & tile_mask) - ++vops->pt_update_ops[i].num_ops; + vops->pt_update_ops[i].num_ops += inc_val; } static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma, @@ -842,7 +870,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma, xe_vm_populate_rebind(op, vma, tile_mask); list_add_tail(&op->link, &vops->list); - xe_vma_ops_incr_pt_update_ops(vops, tile_mask); + xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1); return 0; } @@ -977,7 +1005,7 @@ xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops, xe_vm_populate_range_rebind(op, vma, range, tile_mask); list_add_tail(&op->link, &vops->list); - xe_vma_ops_incr_pt_update_ops(vops, tile_mask); + xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1); return 0; } @@ -1062,7 +1090,7 @@ xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops, xe_vm_populate_range_unbind(op, range); list_add_tail(&op->link, &vops->list); - xe_vma_ops_incr_pt_update_ops(vops, range->tile_present); + xe_vma_ops_incr_pt_update_ops(vops, range->tile_present, 1); return 0; } @@ -2141,6 +2169,35 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data, return err; } +static bool vma_matches(struct xe_vma *vma, u64 page_addr) +{ + if (page_addr > xe_vma_end(vma) - 1 || + page_addr + SZ_4K - 1 < xe_vma_start(vma)) + return false; + + return true; +} + +/** + * xe_vm_find_vma_by_addr() - Find a VMA by its address + * + * @vm: the xe_vm the vma belongs to + * @page_addr: address to look up + */ +struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr) +{ + struct xe_vma *vma = NULL; + + if (vm->usm.last_fault_vma) { /* Fast lookup */ + if (vma_matches(vm->usm.last_fault_vma, page_addr)) + vma = vm->usm.last_fault_vma; + } + if (!vma) + vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K); + + return vma; +} + static const u32 region_to_mem_type[] = { XE_PL_TT, XE_PL_VRAM0, @@ -2221,13 +2278,25 @@ static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags) return true; } +static void xe_svm_prefetch_gpuva_ops_fini(struct drm_gpuva_ops *ops) +{ + struct drm_gpuva_op *__op; + + drm_gpuva_for_each_op(__op, ops) { + struct xe_vma_op *op = gpuva_op_to_vma_op(__op); + + xe_vma_svm_prefetch_op_fini(op); + } +} + /* * Create operations list from IOCTL arguments, setup operations fields so parse * and commit steps are decoupled from IOCTL arguments. This step can fail. */ static struct drm_gpuva_ops * -vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, - u64 bo_offset_or_userptr, u64 addr, u64 range, +vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops, + struct xe_bo *bo, u64 bo_offset_or_userptr, + u64 addr, u64 range, u32 operation, u32 flags, u32 prefetch_region, u16 pat_index) { @@ -2235,6 +2304,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, struct drm_gpuva_ops *ops; struct drm_gpuva_op *__op; struct drm_gpuvm_bo *vm_bo; + u64 range_end = addr + range; int err; lockdep_assert_held_write(&vm->lock); @@ -2296,14 +2366,80 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, op->map.invalidate_on_bind = __xe_vm_needs_clear_scratch_pages(vm, flags); } else if (__op->op == DRM_GPUVA_OP_PREFETCH) { - op->prefetch.region = prefetch_region; - } + struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va); + struct xe_svm_range *svm_range; + struct drm_gpusvm_ctx ctx = {}; + struct xe_tile *tile; + u8 id, tile_mask = 0; + u32 i; + + if (!xe_vma_is_cpu_addr_mirror(vma)) { + op->prefetch.region = prefetch_region; + break; + } + + ctx.read_only = xe_vma_read_only(vma); + ctx.devmem_possible = IS_DGFX(vm->xe) && + IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR); + + for_each_tile(tile, vm->xe, id) + tile_mask |= 0x1 << id; + + xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC); + op->prefetch_range.region = prefetch_region; + op->prefetch_range.ranges_count = 0; +alloc_next_range: + svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx); + + if (PTR_ERR(svm_range) == -ENOENT) { + u64 ret = xe_svm_find_vma_start(vm, addr, range_end, vma); + + addr = ret == ULONG_MAX ? 0 : ret; + if (addr) + goto alloc_next_range; + else + goto print_op_label; + } + + if (IS_ERR(svm_range)) { + err = PTR_ERR(svm_range); + goto unwind_prefetch_ops; + } + + if (xe_svm_range_validate(vm, svm_range, tile_mask, !!prefetch_region)) { + xe_svm_range_debug(svm_range, "PREFETCH - RANGE IS VALID"); + goto check_next_range; + } + + err = xa_alloc(&op->prefetch_range.range, + &i, svm_range, xa_limit_32b, + GFP_KERNEL); + if (err) + goto unwind_prefetch_ops; + + op->prefetch_range.ranges_count++; + vops->flags |= XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH; + xe_svm_range_debug(svm_range, "PREFETCH - RANGE CREATED"); +check_next_range: + if (range_end > xe_svm_range_end(svm_range) && + xe_svm_range_end(svm_range) < xe_vma_end(vma)) { + addr = xe_svm_range_end(svm_range); + goto alloc_next_range; + } + } +print_op_label: print_op(vm->xe, __op); } return ops; + +unwind_prefetch_ops: + xe_svm_prefetch_gpuva_ops_fini(ops); + drm_gpuva_ops_free(&vm->gpuvm, ops); + return ERR_PTR(err); } + ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO); static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, @@ -2498,7 +2634,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, !op->map.is_cpu_addr_mirror) || op->map.invalidate_on_bind) xe_vma_ops_incr_pt_update_ops(vops, - op->tile_mask); + op->tile_mask, 1); break; } case DRM_GPUVA_OP_REMAP: @@ -2507,6 +2643,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, gpuva_to_vma(op->base.remap.unmap->va); bool skip = xe_vma_is_cpu_addr_mirror(old); u64 start = xe_vma_start(old), end = xe_vma_end(old); + int num_remap_ops = 0; if (op->base.remap.prev) start = op->base.remap.prev->va.addr + @@ -2559,7 +2696,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, (ULL)op->remap.start, (ULL)op->remap.range); } else { - xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); + num_remap_ops++; } } @@ -2588,11 +2725,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, (ULL)op->remap.start, (ULL)op->remap.range); } else { - xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); + num_remap_ops++; } } if (!skip) - xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); + num_remap_ops++; + + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, num_remap_ops); break; } case DRM_GPUVA_OP_UNMAP: @@ -2604,7 +2743,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, return -EBUSY; if (!xe_vma_is_cpu_addr_mirror(vma)) - xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1); break; case DRM_GPUVA_OP_PREFETCH: vma = gpuva_to_vma(op->base.prefetch.va); @@ -2615,8 +2754,12 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, return err; } - if (!xe_vma_is_cpu_addr_mirror(vma)) - xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); + if (xe_vma_is_cpu_addr_mirror(vma)) + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, + op->prefetch_range.ranges_count); + else + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1); + break; default: drm_warn(&vm->xe->drm, "NOT POSSIBLE"); @@ -2742,6 +2885,57 @@ static int check_ufence(struct xe_vma *vma) return 0; } +static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op) +{ + bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR); + struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va); + int err = 0; + + struct xe_svm_range *svm_range; + struct drm_gpusvm_ctx ctx = {}; + struct xe_tile *tile; + unsigned long i; + u32 region; + + if (!xe_vma_is_cpu_addr_mirror(vma)) + return 0; + + region = op->prefetch_range.region; + + ctx.read_only = xe_vma_read_only(vma); + ctx.devmem_possible = devmem_possible; + ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0; + + /* TODO: Threading the migration */ + xa_for_each(&op->prefetch_range.range, i, svm_range) { + if (!region) + xe_svm_range_migrate_to_smem(vm, svm_range); + + if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) { + tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0]; + err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx); + if (err) { + drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n", + vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); + return -ENODATA; + } + xe_svm_range_debug(svm_range, "PREFETCH - RANGE MIGRATED TO VRAM"); + } + + err = xe_svm_range_get_pages(vm, svm_range, &ctx); + if (err) { + drm_dbg(&vm->xe->drm, "Get pages failed, asid=%u, gpusvm=%p, errno=%pe\n", + vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); + if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) + err = -ENODATA; + return err; + } + xe_svm_range_debug(svm_range, "PREFETCH - RANGE GET PAGES DONE"); + } + + return err; +} + static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, struct xe_vma_op *op) { @@ -2779,7 +2973,12 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, case DRM_GPUVA_OP_PREFETCH: { struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va); - u32 region = op->prefetch.region; + u32 region; + + if (xe_vma_is_cpu_addr_mirror(vma)) + region = op->prefetch_range.region; + else + region = op->prefetch.region; xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type)); @@ -2798,6 +2997,25 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, return err; } +static int vm_bind_ioctl_ops_prefetch_ranges(struct xe_vm *vm, struct xe_vma_ops *vops) +{ + struct xe_vma_op *op; + int err; + + if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH)) + return 0; + + list_for_each_entry(op, &vops->list, link) { + if (op->base.op == DRM_GPUVA_OP_PREFETCH) { + err = prefetch_ranges(vm, op); + if (err) + return err; + } + } + + return 0; +} + static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, struct xe_vma_ops *vops) @@ -3239,6 +3457,7 @@ static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm, vops->q = q; vops->syncs = syncs; vops->num_syncs = num_syncs; + vops->flags = 0; } static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo, @@ -3446,7 +3665,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance; u16 pat_index = bind_ops[i].pat_index; - ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset, + ops[i] = vm_bind_ioctl_ops_create(vm, &vops, bos[i], obj_offset, addr, range, op, flags, prefetch_region, pat_index); if (IS_ERR(ops[i])) { @@ -3479,6 +3698,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (err) goto unwind_ops; + err = vm_bind_ioctl_ops_prefetch_ranges(vm, &vops); + if (err) + goto unwind_ops; + fence = vm_bind_ioctl_ops_execute(vm, &vops); if (IS_ERR(fence)) err = PTR_ERR(fence); @@ -3548,7 +3771,7 @@ struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo, xe_vma_ops_init(&vops, vm, q, NULL, 0); - ops = vm_bind_ioctl_ops_create(vm, bo, 0, addr, bo->size, + ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, bo->size, DRM_XE_VM_BIND_OP_MAP, 0, 0, vm->xe->pat.idx[cache_lvl]); if (IS_ERR(ops)) { @@ -3620,6 +3843,68 @@ void xe_vm_unlock(struct xe_vm *vm) } /** + * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an + * address range + * @vm: The VM + * @start: start address + * @end: end address + * @tile_mask: mask for which gt's issue tlb invalidation + * + * Issue a range based TLB invalidation for gt's in tilemask + * + * Returns 0 for success, negative error code otherwise. + */ +int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start, + u64 end, u8 tile_mask) +{ + struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE]; + struct xe_tile *tile; + u32 fence_id = 0; + u8 id; + int err; + + if (!tile_mask) + return 0; + + for_each_tile(tile, vm->xe, id) { + if (tile_mask & BIT(id)) { + xe_gt_tlb_invalidation_fence_init(tile->primary_gt, + &fence[fence_id], true); + + err = xe_gt_tlb_invalidation_range(tile->primary_gt, + &fence[fence_id], + start, + end, + vm->usm.asid); + if (err) + goto wait; + ++fence_id; + + if (!tile->media_gt) + continue; + + xe_gt_tlb_invalidation_fence_init(tile->media_gt, + &fence[fence_id], true); + + err = xe_gt_tlb_invalidation_range(tile->media_gt, + &fence[fence_id], + start, + end, + vm->usm.asid); + if (err) + goto wait; + ++fence_id; + } + } + +wait: + for (id = 0; id < fence_id; ++id) + xe_gt_tlb_invalidation_fence_wait(&fence[id]); + + return err; +} + +/** * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock * @vma: VMA to invalidate * @@ -3632,28 +3917,34 @@ void xe_vm_unlock(struct xe_vm *vm) int xe_vm_invalidate_vma(struct xe_vma *vma) { struct xe_device *xe = xe_vma_vm(vma)->xe; + struct xe_vm *vm = xe_vma_vm(vma); struct xe_tile *tile; - struct xe_gt_tlb_invalidation_fence - fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE]; - u8 id; - u32 fence_id = 0; + u8 tile_mask = 0; int ret = 0; + u8 id; xe_assert(xe, !xe_vma_is_null(vma)); xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma)); trace_xe_vma_invalidate(vma); - vm_dbg(&xe_vma_vm(vma)->xe->drm, + vm_dbg(&vm->xe->drm, "INVALIDATE: addr=0x%016llx, range=0x%016llx", xe_vma_start(vma), xe_vma_size(vma)); - /* Check that we don't race with page-table updates */ + /* + * Check that we don't race with page-table updates, tile_invalidated + * update is safe + */ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { if (xe_vma_is_userptr(vma)) { + lockdep_assert(lockdep_is_held_type(&vm->userptr.notifier_lock, 0) || + (lockdep_is_held_type(&vm->userptr.notifier_lock, 1) && + lockdep_is_held(&xe_vm_resv(vm)->lock.base))); + WARN_ON_ONCE(!mmu_interval_check_retry (&to_userptr_vma(vma)->userptr.notifier, to_userptr_vma(vma)->userptr.notifier_seq)); - WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)), + WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm), DMA_RESV_USAGE_BOOKKEEP)); } else { @@ -3661,39 +3952,17 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) } } - for_each_tile(tile, xe, id) { - if (xe_pt_zap_ptes(tile, vma)) { - xe_device_wmb(xe); - xe_gt_tlb_invalidation_fence_init(tile->primary_gt, - &fence[fence_id], - true); - - ret = xe_gt_tlb_invalidation_vma(tile->primary_gt, - &fence[fence_id], vma); - if (ret) - goto wait; - ++fence_id; - - if (!tile->media_gt) - continue; - - xe_gt_tlb_invalidation_fence_init(tile->media_gt, - &fence[fence_id], - true); + for_each_tile(tile, xe, id) + if (xe_pt_zap_ptes(tile, vma)) + tile_mask |= BIT(id); - ret = xe_gt_tlb_invalidation_vma(tile->media_gt, - &fence[fence_id], vma); - if (ret) - goto wait; - ++fence_id; - } - } + xe_device_wmb(xe); -wait: - for (id = 0; id < fence_id; ++id) - xe_gt_tlb_invalidation_fence_wait(&fence[id]); + ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma), + xe_vma_end(vma), tile_mask); - vma->tile_invalidated = vma->tile_mask; + /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ + WRITE_ONCE(vma->tile_invalidated, vma->tile_mask); return ret; } diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index 494af6bdc646..3475a118f666 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -169,6 +169,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma) !xe_vma_is_cpu_addr_mirror(vma); } +struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr); + /** * to_userptr_vma() - Return a pointer to an embedding userptr vma * @vma: Pointer to the embedded struct xe_vma @@ -226,6 +228,9 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm, struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm, struct xe_svm_range *range); +int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start, + u64 end, u8 tile_mask); + int xe_vm_invalidate_vma(struct xe_vma *vma); int xe_vm_validate_protected(struct xe_vm *vm); @@ -370,6 +375,25 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm) return false; } +/** + * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has + * a valid GPU mapping + * @tile: The tile which the GPU mapping belongs to + * @tile_present: Tile present mask + * @tile_invalidated: Tile invalidated mask + * + * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths + * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable + * without the notifier lock in userptr or SVM cases, and not reliable without + * the BO dma-resv lock in the BO case. As such, they should only be used in + * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB + * invalidation) where it is harmless. + * + * Return: True is there are valid GPU pages, False otherwise + */ +#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \ + ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id)) + #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma); #else diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 1979e9bdbdf3..bed6088e1bb3 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -100,14 +100,21 @@ struct xe_vma { struct work_struct destroy_work; }; - /** @tile_invalidated: VMA has been invalidated */ + /** + * @tile_invalidated: Tile mask of binding are invalidated for this VMA. + * protected by BO's resv and for userptrs, vm->userptr.notifier_lock in + * write mode for writing or vm->userptr.notifier_lock in read mode and + * the vm->resv. For stable reading, BO's resv or userptr + * vm->userptr.notifier_lock in read mode is required. Can be + * opportunistically read with READ_ONCE outside of locks. + */ u8 tile_invalidated; /** @tile_mask: Tile mask of where to create binding for this VMA */ u8 tile_mask; /** - * @tile_present: GT mask of binding are present for this VMA. + * @tile_present: Tile mask of binding are present for this VMA. * protected by vm->lock, vm->resv and for userptrs, * vm->userptr.notifier_lock for writing. Needs either for reading, * but if reading is done under the vm->lock only, it needs to be held @@ -382,6 +389,16 @@ struct xe_vma_op_unmap_range { struct xe_svm_range *range; }; +/** struct xe_vma_op_prefetch_range - VMA prefetch range operation */ +struct xe_vma_op_prefetch_range { + /** @range: xarray for SVM ranges data */ + struct xarray range; + /** @ranges_count: number of svm ranges to map */ + u32 ranges_count; + /** @region: memory region to prefetch to */ + u32 region; +}; + /** enum xe_vma_op_flags - flags for VMA operation */ enum xe_vma_op_flags { /** @XE_VMA_OP_COMMITTED: VMA operation committed */ @@ -424,6 +441,8 @@ struct xe_vma_op { struct xe_vma_op_map_range map_range; /** @unmap_range: VMA unmap range operation specific data */ struct xe_vma_op_unmap_range unmap_range; + /** @prefetch_range: VMA prefetch range operation specific data */ + struct xe_vma_op_prefetch_range prefetch_range; }; }; @@ -441,6 +460,9 @@ struct xe_vma_ops { u32 num_syncs; /** @pt_update_ops: page table update operations */ struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE]; + /** @flag: signify the properties within xe_vma_ops*/ +#define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0) + u32 flags; #ifdef TEST_VM_OPS_ERROR /** @inject_error: inject error to test error handling */ bool inject_error; diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c index b378848d3b7b..3e573b0b7ebd 100644 --- a/drivers/gpu/drm/xe/xe_vsec.c +++ b/drivers/gpu/drm/xe/xe_vsec.c @@ -149,8 +149,8 @@ static int xe_guid_decode(u32 guid, int *index, u32 *offset) return 0; } -static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset, - u32 count) +int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset, + u32 count) { struct xe_device *xe = pdev_to_xe_device(pdev); void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET; diff --git a/drivers/gpu/drm/xe/xe_vsec.h b/drivers/gpu/drm/xe/xe_vsec.h index 5777c53faec2..dabfb4e02d70 100644 --- a/drivers/gpu/drm/xe/xe_vsec.h +++ b/drivers/gpu/drm/xe/xe_vsec.h @@ -4,8 +4,12 @@ #ifndef _XE_VSEC_H_ #define _XE_VSEC_H_ +#include <linux/types.h> + +struct pci_dev; struct xe_device; void xe_vsec_init(struct xe_device *xe); +int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset, u32 count); #endif diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 67196baa4249..4a76de391abb 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -503,10 +503,6 @@ static const struct xe_rtp_entry_sr engine_was[] = { XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL)) }, - { XE_RTP_NAME("16018737384"), - XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)), - XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS)) - }, /* * These two workarounds are the same, just applying to different * engines. Although Wa_18032095049 (for the RCS) isn't required on @@ -533,31 +529,38 @@ static const struct xe_rtp_entry_sr engine_was[] = { /* Xe2_HPG */ { XE_RTP_NAME("16018712365"), - XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), + FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, XE2_ALLOC_DPA_STARVE_FIX_DIS)) }, { XE_RTP_NAME("16018737384"), - XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED), + FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS)) }, { XE_RTP_NAME("14019988906"), - XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), + FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD)) }, { XE_RTP_NAME("14019877138"), - XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), + FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT)) }, { XE_RTP_NAME("14020338487"), - XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), + FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(ROW_CHICKEN3, XE2_EUPEND_CHK_FLUSH_DIS)) }, { XE_RTP_NAME("18032247524"), - XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), + FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, SEQUENTIAL_ACCESS_UPGRADE_DISABLE)) }, { XE_RTP_NAME("14018471104"), - XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), + FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL)) }, /* @@ -566,7 +569,7 @@ static const struct xe_rtp_entry_sr engine_was[] = { * apply this to all engines for simplicity. */ { XE_RTP_NAME("16021639441"), - XE_RTP_RULES(GRAPHICS_VERSION(2001)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002)), XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), GHWSP_CSB_REPORT_DIS | PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS, @@ -578,11 +581,12 @@ static const struct xe_rtp_entry_sr engine_was[] = { XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, WR_REQ_CHAINING_DIS)) }, { XE_RTP_NAME("14021402888"), - XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) }, - { XE_RTP_NAME("14021821874"), - XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)), + { XE_RTP_NAME("14021821874, 14022954250"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), + FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, STK_ID_RESTRICT)) }, @@ -774,7 +778,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = { XE_RTP_ACTIONS(SET(INSTPM(RENDER_RING_BASE), ENABLE_SEMAPHORE_POLL_BIT)) }, { XE_RTP_NAME("18033852989"), - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2004), ENGINE_CLASS(RENDER)), + XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST)) }, { XE_RTP_NAME("14021567978"), @@ -807,7 +811,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = { XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN)) }, { XE_RTP_NAME("14019386621"), - XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(VF_SCRATCHPAD, XE2_VFG_TED_CREDIT_INTERFACE_DISABLE)) }, { XE_RTP_NAME("14020756599"), @@ -824,13 +828,17 @@ static const struct xe_rtp_entry_sr lrc_was[] = { DIS_AUTOSTRIP)) }, { XE_RTP_NAME("15016589081"), - XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX)) }, { XE_RTP_NAME("22021007897"), - XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE)) }, + { XE_RTP_NAME("18033852989"), + XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST)) + }, /* Xe3_LPG */ { XE_RTP_NAME("14021490052"), diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 9efc5accd43d..0ee74a5b2407 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -21,7 +21,8 @@ GRAPHICS_VERSION_RANGE(1270, 1274) MEDIA_VERSION(1300) PLATFORM(DG2) -14018094691 GRAPHICS_VERSION(2004) +14018094691 GRAPHICS_VERSION_RANGE(2001, 2002) + GRAPHICS_VERSION(2004) 14019882105 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0) 18024947630 GRAPHICS_VERSION(2001) GRAPHICS_VERSION(2004) @@ -30,10 +31,10 @@ GRAPHICS_VERSION(2004) 13011645652 GRAPHICS_VERSION(2004) GRAPHICS_VERSION(3001) -14022293748 GRAPHICS_VERSION(2001) +14022293748 GRAPHICS_VERSION_RANGE(2001, 2002) GRAPHICS_VERSION(2004) GRAPHICS_VERSION_RANGE(3000, 3001) -22019794406 GRAPHICS_VERSION(2001) +22019794406 GRAPHICS_VERSION_RANGE(2001, 2002) GRAPHICS_VERSION(2004) GRAPHICS_VERSION_RANGE(3000, 3001) 22019338487 MEDIA_VERSION(2000) @@ -59,3 +60,7 @@ no_media_l3 MEDIA_VERSION(3000) MEDIA_VERSION_RANGE(1301, 3000) 16026508708 GRAPHICS_VERSION_RANGE(1200, 3001) MEDIA_VERSION_RANGE(1300, 3000) + +# SoC workaround - currently applies to all platforms with the following +# primary GT GMDID +14022085890 GRAPHICS_VERSION(2001) |
