summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2026-01-01 17:00:22 +1000
committerDave Airlie <airlied@redhat.com>2026-01-01 17:00:59 +1000
commit59260fe5821ad108d0fda8a4a4fe0448e9821f27 (patch)
treeb6581b5aa5cae8b4fc1f21761211a87eaa5a6dc6 /include
parent9ec3c8ee16a07dff8be82aba595dd77c134c03c2 (diff)
parent0b075f82935e82fc9fff90d06d2a161caaebd9c3 (diff)
Merge tag 'drm-xe-next-2025-12-30' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next
Core Changes: - Dynamic pagemaps and multi-device SVM (Thomas) Driver Changes: - Introduce SRIOV scheduler Groups (Daniele) - Configure migration queue as low latency (Francois) - Don't use absolute path in generated header comment (Calvin Owens) - Add SoC remapper support for system controller (Umesh) - Insert compiler barriers in GuC code (Jonathan) - Rebar updates (Lucas) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com> Link: https://patch.msgid.link/aVOiULyYdnFbq-JB@fedora
Diffstat (limited to 'include')
-rw-r--r--include/drm/drm_gpusvm.h29
-rw-r--r--include/drm/drm_pagemap.h128
-rw-r--r--include/drm/drm_pagemap_util.h92
-rw-r--r--include/uapi/drm/xe_drm.h18
4 files changed, 257 insertions, 10 deletions
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index 632e100e6efb..2578ac92a8d4 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -328,6 +328,35 @@ void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_pages *svm_pages,
unsigned long npages);
+/**
+ * enum drm_gpusvm_scan_result - Scan result from the drm_gpusvm_scan_mm() function.
+ * @DRM_GPUSVM_SCAN_UNPOPULATED: At least one page was not present or inaccessible.
+ * @DRM_GPUSVM_SCAN_EQUAL: All pages belong to the struct dev_pagemap indicated as
+ * the @pagemap argument to the drm_gpusvm_scan_mm() function.
+ * @DRM_GPUSVM_SCAN_OTHER: All pages belong to exactly one dev_pagemap, which is
+ * *NOT* the @pagemap argument to the drm_gpusvm_scan_mm(). All pages belong to
+ * the same device private owner.
+ * @DRM_GPUSVM_SCAN_SYSTEM: All pages are present and system pages.
+ * @DRM_GPUSVM_SCAN_MIXED_DEVICE: All pages are device pages and belong to at least
+ * two different struct dev_pagemaps. All pages belong to the same device private
+ * owner.
+ * @DRM_GPUSVM_SCAN_MIXED: Pages are present and are a mix of system pages
+ * and device-private pages. All device-private pages belong to the same device
+ * private owner.
+ */
+enum drm_gpusvm_scan_result {
+ DRM_GPUSVM_SCAN_UNPOPULATED,
+ DRM_GPUSVM_SCAN_EQUAL,
+ DRM_GPUSVM_SCAN_OTHER,
+ DRM_GPUSVM_SCAN_SYSTEM,
+ DRM_GPUSVM_SCAN_MIXED_DEVICE,
+ DRM_GPUSVM_SCAN_MIXED,
+};
+
+enum drm_gpusvm_scan_result drm_gpusvm_scan_mm(struct drm_gpusvm_range *range,
+ void *dev_private_owner,
+ const struct dev_pagemap *pagemap);
+
#ifdef CONFIG_LOCKDEP
/**
* drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index f6e7e234c089..46e9c58f09e0 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -8,7 +8,10 @@
#define NR_PAGES(order) (1U << (order))
+struct dma_fence;
struct drm_pagemap;
+struct drm_pagemap_cache;
+struct drm_pagemap_dev_hold;
struct drm_pagemap_zdd;
struct device;
@@ -123,17 +126,49 @@ struct drm_pagemap_ops {
unsigned long start, unsigned long end,
struct mm_struct *mm,
unsigned long timeslice_ms);
+ /**
+ * @destroy: Destroy the drm_pagemap and associated resources.
+ * @dpagemap: The drm_pagemap to destroy.
+ * @is_atomic_or_reclaim: The function may be called from
+ * atomic- or reclaim context.
+ *
+ * The implementation should take care not to attempt to
+ * destroy resources that may already have been destroyed
+ * using devm_ callbacks, since this function may be called
+ * after the underlying struct device has been unbound.
+ * If the implementation defers the execution to a work item
+ * to avoid locking issues, then it must make sure the work
+ * items are flushed before module exit. If the destroy call
+ * happens after the provider's pci_remove() callback has
+ * been executed, a module reference and drm device reference is
+ * held across the destroy callback.
+ */
+ void (*destroy)(struct drm_pagemap *dpagemap,
+ bool is_atomic_or_reclaim);
};
/**
* struct drm_pagemap: Additional information for a struct dev_pagemap
* used for device p2p handshaking.
* @ops: The struct drm_pagemap_ops.
- * @dev: The struct drevice owning the device-private memory.
+ * @ref: Reference count.
+ * @drm: The struct drm device owning the device-private memory.
+ * @pagemap: Pointer to the underlying dev_pagemap.
+ * @dev_hold: Pointer to a struct drm_pagemap_dev_hold for
+ * device referencing.
+ * @cache: Back-pointer to the &struct drm_pagemap_cache used for this
+ * &struct drm_pagemap. May be NULL if no cache is used.
+ * @shrink_link: Link into the shrinker's list of drm_pagemaps. Only
+ * used if also using a pagemap cache.
*/
struct drm_pagemap {
const struct drm_pagemap_ops *ops;
- struct device *dev;
+ struct kref ref;
+ struct drm_device *drm;
+ struct dev_pagemap *pagemap;
+ struct drm_pagemap_dev_hold *dev_hold;
+ struct drm_pagemap_cache *cache;
+ struct list_head shrink_link;
};
struct drm_pagemap_devmem;
@@ -174,6 +209,8 @@ struct drm_pagemap_devmem_ops {
* @pages: Pointer to array of device memory pages (destination)
* @pagemap_addr: Pointer to array of DMA information (source)
* @npages: Number of pages to copy
+ * @pre_migrate_fence: dma-fence to wait for before migration start.
+ * May be NULL.
*
* Copy pages to device memory. If the order of a @pagemap_addr entry
* is greater than 0, the entry is populated but subsequent entries
@@ -183,13 +220,16 @@ struct drm_pagemap_devmem_ops {
*/
int (*copy_to_devmem)(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages);
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence);
/**
* @copy_to_ram: Copy to system RAM (required for migration)
* @pages: Pointer to array of device memory pages (source)
* @pagemap_addr: Pointer to array of DMA information (destination)
* @npages: Number of pages to copy
+ * @pre_migrate_fence: dma-fence to wait for before migration start.
+ * May be NULL.
*
* Copy pages to system RAM. If the order of a @pagemap_addr entry
* is greater than 0, the entry is populated but subsequent entries
@@ -199,9 +239,60 @@ struct drm_pagemap_devmem_ops {
*/
int (*copy_to_ram)(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages);
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence);
};
+int drm_pagemap_init(struct drm_pagemap *dpagemap,
+ struct dev_pagemap *pagemap,
+ struct drm_device *drm,
+ const struct drm_pagemap_ops *ops);
+
+struct drm_pagemap *drm_pagemap_create(struct drm_device *drm,
+ struct dev_pagemap *pagemap,
+ const struct drm_pagemap_ops *ops);
+
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+
+void drm_pagemap_put(struct drm_pagemap *dpagemap);
+
+#else
+
+static inline void drm_pagemap_put(struct drm_pagemap *dpagemap)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_DRM_GPUSVM) */
+
+/**
+ * drm_pagemap_get() - Obtain a reference on a struct drm_pagemap
+ * @dpagemap: Pointer to the struct drm_pagemap, or NULL.
+ *
+ * Return: Pointer to the struct drm_pagemap, or NULL.
+ */
+static inline struct drm_pagemap *
+drm_pagemap_get(struct drm_pagemap *dpagemap)
+{
+ if (likely(dpagemap))
+ kref_get(&dpagemap->ref);
+
+ return dpagemap;
+}
+
+/**
+ * drm_pagemap_get_unless_zero() - Obtain a reference on a struct drm_pagemap
+ * unless the current reference count is zero.
+ * @dpagemap: Pointer to the drm_pagemap or NULL.
+ *
+ * Return: A pointer to @dpagemap if the reference count was successfully
+ * incremented. NULL if @dpagemap was NULL, or its refcount was 0.
+ */
+static inline struct drm_pagemap * __must_check
+drm_pagemap_get_unless_zero(struct drm_pagemap *dpagemap)
+{
+ return (dpagemap && kref_get_unless_zero(&dpagemap->ref)) ? dpagemap : NULL;
+}
+
/**
* struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
*
@@ -212,6 +303,8 @@ struct drm_pagemap_devmem_ops {
* @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
* @size: Size of device memory allocation
* @timeslice_expiration: Timeslice expiration in jiffies
+ * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
+ * (May be NULL).
*/
struct drm_pagemap_devmem {
struct device *dev;
@@ -221,13 +314,30 @@ struct drm_pagemap_devmem {
struct drm_pagemap *dpagemap;
size_t size;
u64 timeslice_expiration;
+ struct dma_fence *pre_migrate_fence;
+};
+
+/**
+ * struct drm_pagemap_migrate_details - Details to govern migration.
+ * @timeslice_ms: The time requested for the migrated pagemap pages to
+ * be present in @mm before being allowed to be migrated back.
+ * @can_migrate_same_pagemap: Whether the copy function as indicated by
+ * the @source_peer_migrates flag, can migrate device pages within a
+ * single drm_pagemap.
+ * @source_peer_migrates: Whether on p2p migration, The source drm_pagemap
+ * should use the copy_to_ram() callback rather than the destination
+ * drm_pagemap should use the copy_to_devmem() callback.
+ */
+struct drm_pagemap_migrate_details {
+ unsigned long timeslice_ms;
+ u32 can_migrate_same_pagemap : 1;
+ u32 source_peer_migrates : 1;
};
int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
struct mm_struct *mm,
unsigned long start, unsigned long end,
- unsigned long timeslice_ms,
- void *pgmap_owner);
+ const struct drm_pagemap_migrate_details *mdetails);
int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
@@ -238,11 +348,15 @@ struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
struct device *dev, struct mm_struct *mm,
const struct drm_pagemap_devmem_ops *ops,
- struct drm_pagemap *dpagemap, size_t size);
+ struct drm_pagemap *dpagemap, size_t size,
+ struct dma_fence *pre_migrate_fence);
int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long start, unsigned long end,
struct mm_struct *mm,
unsigned long timeslice_ms);
+void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim);
+
+int drm_pagemap_reinit(struct drm_pagemap *dpagemap);
#endif
diff --git a/include/drm/drm_pagemap_util.h b/include/drm/drm_pagemap_util.h
new file mode 100644
index 000000000000..19169b42b891
--- /dev/null
+++ b/include/drm/drm_pagemap_util.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _DRM_PAGEMAP_UTIL_H_
+#define _DRM_PAGEMAP_UTIL_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+struct drm_device;
+struct drm_pagemap;
+struct drm_pagemap_cache;
+struct drm_pagemap_owner;
+struct drm_pagemap_shrinker;
+
+/**
+ * struct drm_pagemap_peer - Structure representing a fast interconnect peer
+ * @list: Pointer to a &struct drm_pagemap_owner_list used to keep track of peers
+ * @link: List link for @list's list of peers.
+ * @owner: Pointer to a &struct drm_pagemap_owner, common for a set of peers having
+ * fast interconnects.
+ * @private: Pointer private to the struct embedding this struct.
+ */
+struct drm_pagemap_peer {
+ struct drm_pagemap_owner_list *list;
+ struct list_head link;
+ struct drm_pagemap_owner *owner;
+ void *private;
+};
+
+/**
+ * struct drm_pagemap_owner_list - Keeping track of peers and owners
+ * @peer: List of peers.
+ *
+ * The owner list defines the scope where we identify peers having fast interconnects
+ * and a common owner. Typically a driver has a single global owner list to
+ * keep track of common owners for the driver's pagemaps.
+ */
+struct drm_pagemap_owner_list {
+ /** @lock: Mutex protecting the @peers list. */
+ struct mutex lock;
+ /** @peers: List of peers. */
+ struct list_head peers;
+};
+
+/*
+ * Convenience macro to define an owner list.
+ * Typically the owner list statically declared
+ * driver-wide.
+ */
+#define DRM_PAGEMAP_OWNER_LIST_DEFINE(_name) \
+ struct drm_pagemap_owner_list _name = { \
+ .lock = __MUTEX_INITIALIZER((_name).lock), \
+ .peers = LIST_HEAD_INIT((_name).peers) }
+
+void drm_pagemap_shrinker_add(struct drm_pagemap *dpagemap);
+
+int drm_pagemap_cache_lock_lookup(struct drm_pagemap_cache *cache);
+
+void drm_pagemap_cache_unlock_lookup(struct drm_pagemap_cache *cache);
+
+struct drm_pagemap_shrinker *drm_pagemap_shrinker_create_devm(struct drm_device *drm);
+
+struct drm_pagemap_cache *drm_pagemap_cache_create_devm(struct drm_pagemap_shrinker *shrinker);
+
+struct drm_pagemap *drm_pagemap_get_from_cache(struct drm_pagemap_cache *cache);
+
+void drm_pagemap_cache_set_pagemap(struct drm_pagemap_cache *cache, struct drm_pagemap *dpagemap);
+
+struct drm_pagemap *drm_pagemap_get_from_cache_if_active(struct drm_pagemap_cache *cache);
+
+#ifdef CONFIG_PROVE_LOCKING
+
+void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap);
+
+#else
+
+static inline void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap)
+{
+}
+
+#endif /* CONFIG_PROVE_LOCKING */
+
+void drm_pagemap_release_owner(struct drm_pagemap_peer *peer);
+
+int drm_pagemap_acquire_owner(struct drm_pagemap_peer *peer,
+ struct drm_pagemap_owner_list *owner_list,
+ bool (*has_interconnect)(struct drm_pagemap_peer *peer1,
+ struct drm_pagemap_peer *peer2));
+#endif
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 726e481574fe..bb69f9b30c7d 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -2123,7 +2123,13 @@ struct drm_xe_madvise {
struct {
#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
- /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ /**
+ * @preferred_mem_loc.devmem_fd:
+ * Device file-descriptor of the device where the
+ * preferred memory is located, or one of the
+ * above special values. Please also see
+ * @preferred_mem_loc.region_instance below.
+ */
__u32 devmem_fd;
#define DRM_XE_MIGRATE_ALL_PAGES 0
@@ -2131,8 +2137,14 @@ struct drm_xe_madvise {
/** @preferred_mem_loc.migration_policy: Page migration policy */
__u16 migration_policy;
- /** @preferred_mem_loc.pad : MBZ */
- __u16 pad;
+ /**
+ * @preferred_mem_loc.region_instance : Region instance.
+ * MBZ if @devmem_fd <= &DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE.
+ * Otherwise should point to the desired device
+ * VRAM instance of the device indicated by
+ * @preferred_mem_loc.devmem_fd.
+ */
+ __u16 region_instance;
/** @preferred_mem_loc.reserved : Reserved */
__u64 reserved;