diff options
| author | Jonathan Cavitt <jonathan.cavitt@intel.com> | 2026-03-24 15:29:40 +0000 |
|---|---|---|
| committer | Matthew Brost <matthew.brost@intel.com> | 2026-03-25 18:05:59 -0700 |
| commit | 50c577eab051638fbe8989fae1f826ecc1d2e2c7 (patch) | |
| tree | dd742513963a0f83c3a71d5b5e99fbaaf8b9f575 /drivers/gpu/drm/xe | |
| parent | 64c732ee2a00a2d6a2693ed25663fa0544c56ba8 (diff) | |
drm/xe/xe_vm: Implement xe_vm_get_property_ioctl
Add support for userspace to request a list of observed faults
from a specified VM.
v2:
- Only allow querying of failed pagefaults (Matt Brost)
v3:
- Remove unnecessary size parameter from helper function, as it
is a property of the arguments. (jcavitt)
- Remove unnecessary copy_from_user (Jainxun)
- Set address_precision to 1 (Jainxun)
- Report max size instead of dynamic size for memory allocation
purposes. Total memory usage is reported separately.
v4:
- Return int from xe_vm_get_property_size (Shuicheng)
- Fix memory leak (Shuicheng)
- Remove unnecessary size variable (jcavitt)
v5:
- Rename ioctl to xe_vm_get_faults_ioctl (jcavitt)
- Update fill_property_pfs to eliminate need for kzalloc (Jianxun)
v6:
- Repair and move fill_faults break condition (Dan Carpenter)
- Free vm after use (jcavitt)
- Combine assertions (jcavitt)
- Expand size check in xe_vm_get_faults_ioctl (jcavitt)
- Remove return mask from fill_faults, as return is already -EFAULT or 0
(jcavitt)
v7:
- Revert back to using xe_vm_get_property_ioctl
- Apply better copy_to_user logic (jcavitt)
v8:
- Fix and clean up error value handling in ioctl (jcavitt)
- Reapply return mask for fill_faults (jcavitt)
v9:
- Future-proof size logic for zero-size properties (jcavitt)
- Add access and fault types (Jianxun)
- Remove address type (Jianxun)
v10:
- Remove unnecessary switch case logic (Raag)
- Compress size get, size validation, and property fill functions into a
single helper function (jcavitt)
- Assert valid size (jcavitt)
v11:
- Remove unnecessary else condition
- Correct backwards helper function size logic (jcavitt)
v12:
- Use size_t instead of int (Raag)
v13:
- Remove engine class and instance (Ivan)
v14:
- Map access type, fault type, and fault level to user macros (Matt
Brost, Ivan)
v15:
- Remove unnecessary size assertion (jcavitt)
v16:
- Nit fixes (Matt Brost)
v17:
- Rebase and refactor (jcavitt)
v18:
- Do not copy_to_user in critical section (Matt Brost)
- Assert args->size is multiple of sizeof(struct xe_vm_fault) (Matt
Brost)
v19:
- Remove unnecessary memset (Matt Brost)
v20:
- Report canonicalized address (Jose)
- Mask out prefetch data from access type (Jose, jcavitt)
v21:
- s/uAPI/Link in the commit log links
- Align debug parameters
Link: https://github.com/intel/compute-runtime/pull/878
Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
Suggested-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Acked-by: Michal Mrozek <michal.mrozek@intel.com>
Cc: Jainxun Zhang <jianxun.zhang@intel.com>
Cc: Shuicheng Lin <shuicheng.lin@intel.com>
Cc: Raag Jadav <raag.jadav@intel.com>
Cc: Ivan Briano <ivan.briano@intel.com>
Cc: Jose Souza <jose.souza@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20260324152935.72444-10-jonathan.cavitt@intel.com
Diffstat (limited to 'drivers/gpu/drm/xe')
| -rw-r--r-- | drivers/gpu/drm/xe/xe_device.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_vm.c | 117 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_vm.h | 3 |
3 files changed, 122 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 05fa16044f9b..041e014ed92c 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -211,6 +211,8 @@ static const struct drm_ioctl_desc xe_ioctls[] = { DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_SET_PROPERTY, xe_exec_queue_set_property_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(XE_VM_GET_PROPERTY, xe_vm_get_property_ioctl, + DRM_RENDER_ALLOW), }; static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 5ed98ec8674b..d96e0a0c5605 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -3974,6 +3974,123 @@ put_vm: return err; } +/* + * Map access type, fault type, and fault level from current bspec + * specification to user spec abstraction. The current mapping is + * approximately 1-to-1, with access type being the only notable + * exception as it carries additional data with respect to prefetch + * status that needs to be masked out. + */ +static u8 xe_to_user_access_type(u8 access_type) +{ + return access_type & XE_PAGEFAULT_ACCESS_TYPE_MASK; +} + +static u8 xe_to_user_fault_type(u8 fault_type) +{ + return fault_type; +} + +static u8 xe_to_user_fault_level(u8 fault_level) +{ + return fault_level; +} + +static int fill_faults(struct xe_vm *vm, + struct drm_xe_vm_get_property *args) +{ + struct xe_vm_fault __user *usr_ptr = u64_to_user_ptr(args->data); + struct xe_vm_fault *fault_list, fault_entry = { 0 }; + struct xe_vm_fault_entry *entry; + int ret = 0, i = 0, count, entry_size; + + entry_size = sizeof(struct xe_vm_fault); + count = args->size / entry_size; + + fault_list = kcalloc(count, sizeof(struct xe_vm_fault), GFP_KERNEL); + if (!fault_list) + return -ENOMEM; + + spin_lock(&vm->faults.lock); + list_for_each_entry(entry, &vm->faults.list, list) { + if (i == count) + break; + + fault_entry.address = xe_device_canonicalize_addr(vm->xe, entry->address); + fault_entry.address_precision = entry->address_precision; + + fault_entry.access_type = xe_to_user_access_type(entry->access_type); + fault_entry.fault_type = xe_to_user_fault_type(entry->fault_type); + fault_entry.fault_level = xe_to_user_fault_level(entry->fault_level); + + memcpy(&fault_list[i], &fault_entry, entry_size); + + i++; + } + spin_unlock(&vm->faults.lock); + + ret = copy_to_user(usr_ptr, fault_list, args->size); + + kfree(fault_list); + return ret ? -EFAULT : 0; +} + +static int xe_vm_get_property_helper(struct xe_vm *vm, + struct drm_xe_vm_get_property *args) +{ + size_t size; + + switch (args->property) { + case DRM_XE_VM_GET_PROPERTY_FAULTS: + spin_lock(&vm->faults.lock); + size = size_mul(sizeof(struct xe_vm_fault), vm->faults.len); + spin_unlock(&vm->faults.lock); + + if (!args->size) { + args->size = size; + return 0; + } + + /* + * Number of faults may increase between calls to + * xe_vm_get_property_ioctl, so just report the number of + * faults the user requests if it's less than or equal to + * the number of faults in the VM fault array. + * + * We should also at least assert that the args->size value + * is a multiple of the xe_vm_fault struct size. + */ + if (args->size > size || args->size % sizeof(struct xe_vm_fault)) + return -EINVAL; + + return fill_faults(vm, args); + } + return -EINVAL; +} + +int xe_vm_get_property_ioctl(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct xe_device *xe = to_xe_device(drm); + struct xe_file *xef = to_xe_file(file); + struct drm_xe_vm_get_property *args = data; + struct xe_vm *vm; + int ret = 0; + + if (XE_IOCTL_DBG(xe, (args->reserved[0] || args->reserved[1] || + args->reserved[2]))) + return -EINVAL; + + vm = xe_vm_lookup(xef, args->vm_id); + if (XE_IOCTL_DBG(xe, !vm)) + return -ENOENT; + + ret = xe_vm_get_property_helper(vm, args); + + xe_vm_put(vm); + return ret; +} + /** * xe_vm_bind_kernel_bo - bind a kernel BO to a VM * @vm: VM to bind the BO to diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index 42767d2aebac..c5b900f38ded 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -210,6 +210,9 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data, int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int xe_vm_get_property_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + void xe_vm_close_and_put(struct xe_vm *vm); static inline bool xe_vm_in_fault_mode(struct xe_vm *vm) |
