summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Hellström <thomas.hellstrom@linux.intel.com>2025-12-19 12:33:07 +0100
committerThomas Hellström <thomas.hellstrom@linux.intel.com>2025-12-23 10:00:47 +0100
commit14b60874c90a61ffb9c32787bc5f3fcde93b9cd4 (patch)
tree8b777a6f8ac52863b1bd8b3f76a847ccd2164000
parente44f47a9bf5104c9b9efcc8fc95a858c66f2d1e6 (diff)
drm/xe: Use the drm_pagemap_util helper to get a svm pagemap owner
Register a driver-wide owner list, provide a callback to identify fast interconnects and use the drm_pagemap_util helper to allocate or reuse a suitable owner struct. For now we consider pagemaps on different tiles on the same device as having fast interconnect and thus the same owner. v2: - Fix up the error onion unwind in xe_pagemap_create(). (Matt Brost) Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://patch.msgid.link/20251219113320.183860-12-thomas.hellstrom@linux.intel.com
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c64
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h24
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.c2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c2
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h3
5 files changed, 71 insertions, 24 deletions
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 849ba115e6da..92c04d0e93f1 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -22,8 +22,17 @@
#include "xe_vm_types.h"
#include "xe_vram_types.h"
+/* Identifies subclasses of struct drm_pagemap_peer */
+#define XE_PEER_PAGEMAP ((void *)0ul)
+#define XE_PEER_VM ((void *)1ul)
+
static int xe_svm_get_pagemaps(struct xe_vm *vm);
+void *xe_svm_private_page_owner(struct xe_vm *vm, bool force_smem)
+{
+ return force_smem ? NULL : vm->svm.peer.owner;
+}
+
static bool xe_svm_range_in_vram(struct xe_svm_range *range)
{
/*
@@ -777,6 +786,25 @@ static void xe_svm_put_pagemaps(struct xe_vm *vm)
}
}
+static struct device *xe_peer_to_dev(struct drm_pagemap_peer *peer)
+{
+ if (peer->private == XE_PEER_PAGEMAP)
+ return container_of(peer, struct xe_pagemap, peer)->dpagemap.drm->dev;
+
+ return container_of(peer, struct xe_vm, svm.peer)->xe->drm.dev;
+}
+
+static bool xe_has_interconnect(struct drm_pagemap_peer *peer1,
+ struct drm_pagemap_peer *peer2)
+{
+ struct device *dev1 = xe_peer_to_dev(peer1);
+ struct device *dev2 = xe_peer_to_dev(peer2);
+
+ return dev1 == dev2;
+}
+
+static DRM_PAGEMAP_OWNER_LIST_DEFINE(xe_owner_list);
+
/**
* xe_svm_init() - SVM initialize
* @vm: The VM.
@@ -795,10 +823,18 @@ int xe_svm_init(struct xe_vm *vm)
INIT_WORK(&vm->svm.garbage_collector.work,
xe_svm_garbage_collector_work_func);
- err = xe_svm_get_pagemaps(vm);
+ vm->svm.peer.private = XE_PEER_VM;
+ err = drm_pagemap_acquire_owner(&vm->svm.peer, &xe_owner_list,
+ xe_has_interconnect);
if (err)
return err;
+ err = xe_svm_get_pagemaps(vm);
+ if (err) {
+ drm_pagemap_release_owner(&vm->svm.peer);
+ return err;
+ }
+
err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
current->mm, 0, vm->size,
xe_modparam.svm_notifier_size * SZ_1M,
@@ -808,6 +844,7 @@ int xe_svm_init(struct xe_vm *vm)
if (err) {
xe_svm_put_pagemaps(vm);
+ drm_pagemap_release_owner(&vm->svm.peer);
return err;
}
} else {
@@ -830,6 +867,7 @@ void xe_svm_close(struct xe_vm *vm)
xe_assert(vm->xe, xe_vm_is_closed(vm));
flush_work(&vm->svm.garbage_collector.work);
xe_svm_put_pagemaps(vm);
+ drm_pagemap_release_owner(&vm->svm.peer);
}
/**
@@ -977,7 +1015,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
xe_pm_runtime_get_noresume(xe);
err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
start, end, timeslice_ms,
- xe_svm_devm_owner(xe));
+ xpagemap->pagemap.owner);
if (err)
xe_svm_devmem_release(&bo->devmem_allocation);
xe_bo_unlock(bo);
@@ -1092,7 +1130,6 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
.devmem_only = need_vram && devmem_possible,
.timeslice_ms = need_vram && devmem_possible ?
vm->xe->atomic_svm_timeslice_ms : 0,
- .device_private_page_owner = xe_svm_devm_owner(vm->xe),
};
struct xe_validation_ctx vctx;
struct drm_exec exec;
@@ -1116,8 +1153,8 @@ retry:
return err;
dpagemap = xe_vma_resolve_pagemap(vma, tile);
- if (!dpagemap && !ctx.devmem_only)
- ctx.device_private_page_owner = NULL;
+ ctx.device_private_page_owner =
+ xe_svm_private_page_owner(vm, !dpagemap && !ctx.devmem_only);
range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
if (IS_ERR(range))
@@ -1541,6 +1578,8 @@ static void xe_pagemap_destroy_work(struct work_struct *work)
pagemap->range.end - pagemap->range.start + 1);
drm_dev_exit(idx);
}
+
+ drm_pagemap_release_owner(&xpagemap->peer);
kfree(xpagemap);
}
@@ -1591,6 +1630,7 @@ static struct xe_pagemap *xe_pagemap_create(struct xe_device *xe, struct xe_vram
dpagemap = &xpagemap->dpagemap;
INIT_WORK(&xpagemap->destroy_work, xe_pagemap_destroy_work);
xpagemap->vr = vr;
+ xpagemap->peer.private = XE_PEER_PAGEMAP;
err = drm_pagemap_init(dpagemap, pagemap, &xe->drm, &xe_drm_pagemap_ops);
if (err)
@@ -1603,21 +1643,29 @@ static struct xe_pagemap *xe_pagemap_create(struct xe_device *xe, struct xe_vram
goto out_err;
}
+ err = drm_pagemap_acquire_owner(&xpagemap->peer, &xe_owner_list,
+ xe_has_interconnect);
+ if (err)
+ goto out_no_owner;
+
pagemap->type = MEMORY_DEVICE_PRIVATE;
pagemap->range.start = res->start;
pagemap->range.end = res->end;
pagemap->nr_range = 1;
- pagemap->owner = xe_svm_devm_owner(xe);
+ pagemap->owner = xpagemap->peer.owner;
pagemap->ops = drm_pagemap_pagemap_ops_get();
addr = devm_memremap_pages(dev, pagemap);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
- devm_release_mem_region(dev, res->start, res->end - res->start + 1);
- goto out_err;
+ goto out_no_pages;
}
xpagemap->hpa_base = res->start;
return xpagemap;
+out_no_pages:
+ drm_pagemap_release_owner(&xpagemap->peer);
+out_no_owner:
+ devm_release_mem_region(dev, res->start, res->end - res->start + 1);
out_err:
drm_pagemap_put(dpagemap);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 8a49ff17ef0c..5adce108f7eb 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -6,24 +6,11 @@
#ifndef _XE_SVM_H_
#define _XE_SVM_H_
-struct xe_device;
-
-/**
- * xe_svm_devm_owner() - Return the owner of device private memory
- * @xe: The xe device.
- *
- * Return: The owner of this device's device private memory to use in
- * hmm_range_fault()-
- */
-static inline void *xe_svm_devm_owner(struct xe_device *xe)
-{
- return xe;
-}
-
#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
#include <drm/drm_pagemap.h>
#include <drm/drm_gpusvm.h>
+#include <drm/drm_pagemap_util.h>
#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
@@ -65,6 +52,7 @@ struct xe_svm_range {
* @pagemap: The struct dev_pagemap providing the struct pages.
* @dpagemap: The drm_pagemap managing allocation and migration.
* @destroy_work: Handles asnynchronous destruction and caching.
+ * @peer: Used for pagemap owner computation.
* @hpa_base: The host physical address base for the managemd memory.
* @vr: Backpointer to the xe_vram region.
*/
@@ -72,6 +60,7 @@ struct xe_pagemap {
struct dev_pagemap pagemap;
struct drm_pagemap dpagemap;
struct work_struct destroy_work;
+ struct drm_pagemap_peer peer;
resource_size_t hpa_base;
struct xe_vram_region *vr;
};
@@ -131,6 +120,8 @@ u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
+void *xe_svm_private_page_owner(struct xe_vm *vm, bool force_smem);
+
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -368,6 +359,11 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
return NULL;
}
+static inline void *xe_svm_private_page_owner(struct xe_vm *vm, bool force_smem)
+{
+ return NULL;
+}
+
static inline void xe_svm_flush(struct xe_vm *vm)
{
}
diff --git a/drivers/gpu/drm/xe/xe_userptr.c b/drivers/gpu/drm/xe/xe_userptr.c
index 0d9130b1958a..e120323c43bc 100644
--- a/drivers/gpu/drm/xe/xe_userptr.c
+++ b/drivers/gpu/drm/xe/xe_userptr.c
@@ -55,7 +55,7 @@ int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
struct xe_device *xe = vm->xe;
struct drm_gpusvm_ctx ctx = {
.read_only = xe_vma_read_only(vma),
- .device_private_page_owner = xe_svm_devm_owner(xe),
+ .device_private_page_owner = xe_svm_private_page_owner(vm, false),
.allow_mixed = true,
};
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 95e22ff95ea8..0b8412574777 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2908,7 +2908,7 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
ctx.read_only = xe_vma_read_only(vma);
ctx.devmem_possible = devmem_possible;
ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
- ctx.device_private_page_owner = xe_svm_devm_owner(vm->xe);
+ ctx.device_private_page_owner = xe_svm_private_page_owner(vm, !tile);
/* TODO: Threading the migration */
xa_for_each(&op->prefetch_range.range, i, svm_range) {
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 34764d714eb2..cfce18762aa7 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -8,6 +8,7 @@
#include <drm/drm_gpusvm.h>
#include <drm/drm_gpuvm.h>
+#include <drm/drm_pagemap_util.h>
#include <linux/dma-resv.h>
#include <linux/kref.h>
@@ -192,6 +193,8 @@ struct xe_vm {
struct work_struct work;
} garbage_collector;
struct xe_pagemap *pagemaps[XE_MAX_TILES_PER_DEVICE];
+ /** @svm.peer: Used for pagemap connectivity computations. */
+ struct drm_pagemap_peer peer;
} svm;
struct xe_device *xe;