diff options
| author | Dave Airlie <airlied@redhat.com> | 2026-01-23 08:03:43 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2026-01-23 08:03:49 +1000 |
| commit | e63b9229c3d2bf6684b1c154e8365e2d1822f039 (patch) | |
| tree | ea798937726028d6716bf43fccb375e1524fc774 | |
| parent | 353f91bc25e228c7e089b6c2791c04f8dd2517b6 (diff) | |
| parent | e27ada4f19e7ffda4c05ce8633daf6daed667eea (diff) | |
Merge tag 'drm-xe-fixes-2026-01-22' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes
UAPI Changes:
- Disallow bind-queue sharing across multiple VMs (Matt Auld)
Core Changes:
- Fix xe userptr in the absence of CONFIG_DEVICE_PRIVATE (Thomas)
Driver Changes:
- Fix a missed page count update (Matt Brost)
- Fix a confused argument to alloc_workqueue() (Marco Crivellari)
- Kernel-doc fixes (Jani)
- Disable a workaround on VFs (Matt Brost)
- Fix a job lock assert (Matt Auld)
- Update wedged.mode only after successful reset policy change (Lukasz)
- Select CONFIG_DEVICE_PRIVATE when DRM_XE_GPUSVM is selected (Thomas)
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/aXIdiXaY-RxoaviV@fedora
| -rw-r--r-- | drivers/gpu/drm/Kconfig | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/Makefile | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/Kconfig | 5 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_bo.c | 9 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_debugfs.c | 72 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_device_types.h | 18 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_exec_queue.c | 32 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_exec_queue.h | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_exec_queue_types.h | 6 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_ggtt.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_guc_ads.c | 14 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_guc_ads.h | 5 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_late_bind_fw_types.h | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_lrc.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_migrate.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_sriov_vf_ccs.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_vm.c | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_vm.h | 2 | ||||
| -rw-r--r-- | include/drm/drm_pagemap.h | 19 |
20 files changed, 175 insertions, 40 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 7e6bc0b3a589..ed85d0ceee3b 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -210,7 +210,7 @@ config DRM_GPUVM config DRM_GPUSVM tristate - depends on DRM && DEVICE_PRIVATE + depends on DRM select HMM_MIRROR select MMU_NOTIFIER help diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 0e1c668b46d2..d26191717428 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -108,8 +108,10 @@ obj-$(CONFIG_DRM_EXEC) += drm_exec.o obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o drm_gpusvm_helper-y := \ - drm_gpusvm.o\ + drm_gpusvm.o +drm_gpusvm_helper-$(CONFIG_ZONE_DEVICE) += \ drm_pagemap.o + obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 4b288eb3f5b0..4d7dcaff2b91 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -39,7 +39,7 @@ config DRM_XE select DRM_TTM select DRM_TTM_HELPER select DRM_EXEC - select DRM_GPUSVM if !UML && DEVICE_PRIVATE + select DRM_GPUSVM if !UML select DRM_GPUVM select DRM_SCHED select MMU_NOTIFIER @@ -80,8 +80,9 @@ config DRM_XE_GPUSVM bool "Enable CPU to GPU address mirroring" depends on DRM_XE depends on !UML - depends on DEVICE_PRIVATE + depends on ZONE_DEVICE default y + select DEVICE_PRIVATE select DRM_GPUSVM help Enable this option if you want support for CPU to GPU address diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index bf4ee976b680..71acd45aa33b 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -1055,6 +1055,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx, unsigned long *scanned) { struct xe_device *xe = ttm_to_xe_device(bo->bdev); + struct ttm_tt *tt = bo->ttm; long lret; /* Fake move to system, without copying data. */ @@ -1079,8 +1080,10 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx, .writeback = false, .allow_move = false}); - if (lret > 0) + if (lret > 0) { xe_ttm_tt_account_subtract(xe, bo->ttm); + update_global_total_pages(bo->bdev, -(long)tt->num_pages); + } return lret; } @@ -1166,8 +1169,10 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, if (needs_rpm) xe_pm_runtime_put(xe); - if (lret > 0) + if (lret > 0) { xe_ttm_tt_account_subtract(xe, tt); + update_global_total_pages(bo->bdev, -(long)tt->num_pages); + } out_unref: xe_bo_put(xe_bo); diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c index e91da9589c5f..63fd8bf13c70 100644 --- a/drivers/gpu/drm/xe/xe_debugfs.c +++ b/drivers/gpu/drm/xe/xe_debugfs.c @@ -256,14 +256,64 @@ static ssize_t wedged_mode_show(struct file *f, char __user *ubuf, return simple_read_from_buffer(ubuf, size, pos, buf, len); } +static int __wedged_mode_set_reset_policy(struct xe_gt *gt, enum xe_wedged_mode mode) +{ + bool enable_engine_reset; + int ret; + + enable_engine_reset = (mode != XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET); + ret = xe_guc_ads_scheduler_policy_toggle_reset(>->uc.guc.ads, + enable_engine_reset); + if (ret) + xe_gt_err(gt, "Failed to update GuC ADS scheduler policy (%pe)\n", ERR_PTR(ret)); + + return ret; +} + +static int wedged_mode_set_reset_policy(struct xe_device *xe, enum xe_wedged_mode mode) +{ + struct xe_gt *gt; + int ret; + u8 id; + + guard(xe_pm_runtime)(xe); + for_each_gt(gt, xe, id) { + ret = __wedged_mode_set_reset_policy(gt, mode); + if (ret) { + if (id > 0) { + xe->wedged.inconsistent_reset = true; + drm_err(&xe->drm, "Inconsistent reset policy state between GTs\n"); + } + return ret; + } + } + + xe->wedged.inconsistent_reset = false; + + return 0; +} + +static bool wedged_mode_needs_policy_update(struct xe_device *xe, enum xe_wedged_mode mode) +{ + if (xe->wedged.inconsistent_reset) + return true; + + if (xe->wedged.mode == mode) + return false; + + if (xe->wedged.mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET || + mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET) + return true; + + return false; +} + static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf, size_t size, loff_t *pos) { struct xe_device *xe = file_inode(f)->i_private; - struct xe_gt *gt; u32 wedged_mode; ssize_t ret; - u8 id; ret = kstrtouint_from_user(ubuf, size, 0, &wedged_mode); if (ret) @@ -272,22 +322,14 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf, if (wedged_mode > 2) return -EINVAL; - if (xe->wedged.mode == wedged_mode) - return size; + if (wedged_mode_needs_policy_update(xe, wedged_mode)) { + ret = wedged_mode_set_reset_policy(xe, wedged_mode); + if (ret) + return ret; + } xe->wedged.mode = wedged_mode; - xe_pm_runtime_get(xe); - for_each_gt(gt, xe, id) { - ret = xe_guc_ads_scheduler_policy_toggle_reset(>->uc.guc.ads); - if (ret) { - xe_gt_err(gt, "Failed to update GuC ADS scheduler policy. GuC may still cause engine reset even with wedged_mode=2\n"); - xe_pm_runtime_put(xe); - return -EIO; - } - } - xe_pm_runtime_put(xe); - return size; } diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 0b2fa7c56d38..047e86e22133 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -44,6 +44,22 @@ struct xe_pat_ops; struct xe_pxp; struct xe_vram_region; +/** + * enum xe_wedged_mode - possible wedged modes + * @XE_WEDGED_MODE_NEVER: Device will never be declared wedged. + * @XE_WEDGED_MODE_UPON_CRITICAL_ERROR: Device will be declared wedged only + * when critical error occurs like GT reset failure or firmware failure. + * This is the default mode. + * @XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET: Device will be declared wedged on + * any hang. In this mode, engine resets are disabled to avoid automatic + * recovery attempts. This mode is primarily intended for debugging hangs. + */ +enum xe_wedged_mode { + XE_WEDGED_MODE_NEVER = 0, + XE_WEDGED_MODE_UPON_CRITICAL_ERROR = 1, + XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET = 2, +}; + #define XE_BO_INVALID_OFFSET LONG_MAX #define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100) @@ -587,6 +603,8 @@ struct xe_device { int mode; /** @wedged.method: Recovery method to be sent in the drm device wedged uevent */ unsigned long method; + /** @wedged.inconsistent_reset: Inconsistent reset policy state between GTs */ + bool inconsistent_reset; } wedged; /** @bo_device: Struct to control async free of BOs */ diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 8724f8de67e2..779d7e7e2d2e 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -328,6 +328,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe * @xe: Xe device. * @tile: tile which bind exec queue belongs to. * @flags: exec queue creation flags + * @user_vm: The user VM which this exec queue belongs to * @extensions: exec queue creation extensions * * Normalize bind exec queue creation. Bind exec queue is tied to migration VM @@ -341,6 +342,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe */ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, struct xe_tile *tile, + struct xe_vm *user_vm, u32 flags, u64 extensions) { struct xe_gt *gt = tile->primary_gt; @@ -377,6 +379,9 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, xe_exec_queue_put(q); return ERR_PTR(err); } + + if (user_vm) + q->user_vm = xe_vm_get(user_vm); } return q; @@ -407,6 +412,11 @@ void xe_exec_queue_destroy(struct kref *ref) xe_exec_queue_put(eq); } + if (q->user_vm) { + xe_vm_put(q->user_vm); + q->user_vm = NULL; + } + q->ops->destroy(q); } @@ -742,6 +752,22 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) return -EINVAL; + vm = xe_vm_lookup(xef, args->vm_id); + if (XE_IOCTL_DBG(xe, !vm)) + return -ENOENT; + + err = down_read_interruptible(&vm->lock); + if (err) { + xe_vm_put(vm); + return err; + } + + if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { + up_read(&vm->lock); + xe_vm_put(vm); + return -ENOENT; + } + for_each_tile(tile, xe, id) { struct xe_exec_queue *new; @@ -749,9 +775,11 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, if (id) flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD; - new = xe_exec_queue_create_bind(xe, tile, flags, + new = xe_exec_queue_create_bind(xe, tile, vm, flags, args->extensions); if (IS_ERR(new)) { + up_read(&vm->lock); + xe_vm_put(vm); err = PTR_ERR(new); if (q) goto put_exec_queue; @@ -763,6 +791,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, list_add_tail(&new->multi_gt_list, &q->multi_gt_link); } + up_read(&vm->lock); + xe_vm_put(vm); } else { logical_mask = calc_validate_logical_mask(xe, eci, args->width, diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h index fda4d4f9bda8..37a9da22f420 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.h +++ b/drivers/gpu/drm/xe/xe_exec_queue.h @@ -28,6 +28,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe u32 flags, u64 extensions); struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, struct xe_tile *tile, + struct xe_vm *user_vm, u32 flags, u64 extensions); void xe_exec_queue_fini(struct xe_exec_queue *q); diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 771ffe35cd0c..3a4263c92b3d 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -54,6 +54,12 @@ struct xe_exec_queue { struct kref refcount; /** @vm: VM (address space) for this exec queue */ struct xe_vm *vm; + /** + * @user_vm: User VM (address space) for this exec queue (bind queues + * only) + */ + struct xe_vm *user_vm; + /** @class: class of this exec queue */ enum xe_engine_class class; /** diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index ef481b334af4..793d7324a395 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -322,7 +322,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) else ggtt->pt_ops = &xelp_pt_ops; - ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM); + ggtt->wq = alloc_workqueue("xe-ggtt-wq", WQ_MEM_RECLAIM, 0); if (!ggtt->wq) return -ENOMEM; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h index 420b0e6089de..e8897a77ba19 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h @@ -41,10 +41,10 @@ struct xe_gt_sriov_vf_runtime { }; /** - * xe_gt_sriov_vf_migration - VF migration data. + * struct xe_gt_sriov_vf_migration - VF migration data. */ struct xe_gt_sriov_vf_migration { - /** @migration: VF migration recovery worker */ + /** @worker: VF migration recovery worker */ struct work_struct worker; /** @lock: Protects recovery_queued, teardown */ spinlock_t lock; diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index bcb85a1bf26d..3f7f1b5602d5 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -983,16 +983,17 @@ static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_off /** * xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy * @ads: Additional data structures object + * @enable_engine_reset: true to enable engine resets, false otherwise * - * This function update the GuC's engine reset policy based on wedged.mode. + * This function update the GuC's engine reset policy. * * Return: 0 on success, and negative error code otherwise. */ -int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads) +int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads, + bool enable_engine_reset) { struct guc_policies *policies; struct xe_guc *guc = ads_to_guc(ads); - struct xe_device *xe = ads_to_xe(ads); CLASS(xe_guc_buf, buf)(&guc->buf, sizeof(*policies)); if (!xe_guc_buf_is_valid(buf)) @@ -1004,10 +1005,11 @@ int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads) policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time); policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items); policies->is_valid = 1; - if (xe->wedged.mode == 2) - policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET; - else + + if (enable_engine_reset) policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET; + else + policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET; return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf)); } diff --git a/drivers/gpu/drm/xe/xe_guc_ads.h b/drivers/gpu/drm/xe/xe_guc_ads.h index 2e6674c760ff..7a39f361cb17 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.h +++ b/drivers/gpu/drm/xe/xe_guc_ads.h @@ -6,6 +6,8 @@ #ifndef _XE_GUC_ADS_H_ #define _XE_GUC_ADS_H_ +#include <linux/types.h> + struct xe_guc_ads; int xe_guc_ads_init(struct xe_guc_ads *ads); @@ -13,6 +15,7 @@ int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads); void xe_guc_ads_populate(struct xe_guc_ads *ads); void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads); void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads); -int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads); +int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads, + bool enable_engine_reset); #endif diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw_types.h b/drivers/gpu/drm/xe/xe_late_bind_fw_types.h index 0f5da89ce98b..2a8a985c37e7 100644 --- a/drivers/gpu/drm/xe/xe_late_bind_fw_types.h +++ b/drivers/gpu/drm/xe/xe_late_bind_fw_types.h @@ -15,10 +15,12 @@ #define XE_LB_MAX_PAYLOAD_SIZE SZ_4K /** - * xe_late_bind_fw_id - enum to determine late binding fw index + * enum xe_late_bind_fw_id - enum to determine late binding fw index */ enum xe_late_bind_fw_id { + /** @XE_LB_FW_FAN_CONTROL: Fan control */ XE_LB_FW_FAN_CONTROL = 0, + /** @XE_LB_FW_MAX_ID: Number of IDs */ XE_LB_FW_MAX_ID }; diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index b5083c99dd50..281286f2b5f9 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -1050,6 +1050,9 @@ static ssize_t setup_utilization_wa(struct xe_lrc *lrc, { u32 *cmd = batch; + if (IS_SRIOV_VF(gt_to_xe(lrc->gt))) + return 0; + if (xe_gt_WARN_ON(lrc->gt, max_len < 12)) return -ENOSPC; diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 5a95b08a4723..d8ee76aab4e4 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -2445,7 +2445,7 @@ void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q) if (is_migrate) mutex_lock(&m->job_mutex); else - xe_vm_assert_held(q->vm); /* User queues VM's should be locked */ + xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */ } /** @@ -2463,7 +2463,7 @@ void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q) if (is_migrate) mutex_unlock(&m->job_mutex); else - xe_vm_assert_held(q->vm); /* User queues VM's should be locked */ + xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */ } #if IS_ENABLED(CONFIG_PROVE_LOCKING) diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c index 797a4b866226..d963231b5135 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c @@ -346,7 +346,7 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe) flags = EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_PERMANENT | EXEC_QUEUE_FLAG_MIGRATE; - q = xe_exec_queue_create_bind(xe, tile, flags, 0); + q = xe_exec_queue_create_bind(xe, tile, NULL, flags, 0); if (IS_ERR(q)) { err = PTR_ERR(q); goto err_ret; diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 79ab6c512d3e..095bb197e8b0 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1617,7 +1617,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) if (!vm->pt_root[id]) continue; - q = xe_exec_queue_create_bind(xe, tile, create_flags, 0); + q = xe_exec_queue_create_bind(xe, tile, vm, create_flags, 0); if (IS_ERR(q)) { err = PTR_ERR(q); goto err_close; @@ -3578,6 +3578,11 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) } } + if (XE_IOCTL_DBG(xe, q && vm != q->user_vm)) { + err = -EINVAL; + goto put_exec_queue; + } + /* Ensure all UNMAPs visible */ xe_svm_flush(vm); diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index ef8a5019574e..016f6786134c 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -379,7 +379,7 @@ static inline void xe_vm_set_validation_exec(struct xe_vm *vm, struct drm_exec * } /** - * xe_vm_set_validation_exec() - Accessor to read the drm_exec object + * xe_vm_validation_exec() - Accessor to read the drm_exec object * @vm: The vm we want to register a drm_exec object with. * * Return: The drm_exec object used to lock the vm's resv. The value diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h index 70a7991f784f..eb29e5309f0a 100644 --- a/include/drm/drm_pagemap.h +++ b/include/drm/drm_pagemap.h @@ -209,6 +209,19 @@ struct drm_pagemap_devmem_ops { struct dma_fence *pre_migrate_fence); }; +#if IS_ENABLED(CONFIG_ZONE_DEVICE) + +struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page); + +#else + +static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page) +{ + return NULL; +} + +#endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */ + /** * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation * @@ -233,6 +246,8 @@ struct drm_pagemap_devmem { struct dma_fence *pre_migrate_fence; }; +#if IS_ENABLED(CONFIG_ZONE_DEVICE) + int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, struct mm_struct *mm, unsigned long start, unsigned long end, @@ -243,8 +258,6 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation); const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void); -struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page); - void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, struct device *dev, struct mm_struct *mm, const struct drm_pagemap_devmem_ops *ops, @@ -256,4 +269,6 @@ int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, struct mm_struct *mm, unsigned long timeslice_ms); +#endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */ + #endif |
