summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2025-12-26 19:00:34 +1000
committerDave Airlie <airlied@redhat.com>2025-12-26 19:00:41 +1000
commit7bc0f871f992f1469229ffcd2b40a45ec5f695b0 (patch)
tree01a60f01ce5d0c5a574cc3ed60e3f8defcc41c66 /include
parent6c8e404891e1059564d1a15a71d3d76070304dde (diff)
parent332070795bd96193756cb4446eddc3ec9ff6a0e8 (diff)
Merge tag 'drm-misc-next-2025-12-19' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
drm-misc-next for 6.20: Core Changes: - dma-buf: Add tracepoints - sched: Introduce new helpers Driver Changes: - amdxdna: Enable hardware context priority, Remove (obsolete and never public) NPU2 Support, Race condition fix - rockchip: Add RK3368 HDMI Support - rz-du: Add RZ/V2H(P) MIPI-DSI Support - panels: - st7571: Introduce SPI support - New panels: Sitronix ST7920, Samsung LTL106HL02, LG LH546WF1-ED01, HannStar HSD156JUW2 Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <mripard@redhat.com> Link: https://patch.msgid.link/20251219-arcane-quaint-skunk-e383b0@houat
Diffstat (limited to 'include')
-rw-r--r--include/drm/drm_file.h7
-rw-r--r--include/drm/drm_gem.h4
-rw-r--r--include/drm/gpu_scheduler.h52
-rw-r--r--include/trace/events/dma_buf.h157
-rw-r--r--include/uapi/drm/amdxdna_accel.h8
-rw-r--r--include/uapi/drm/panthor_drm.h2
6 files changed, 223 insertions, 7 deletions
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 1a3018e4a537..6ee70ad65e1f 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -33,6 +33,7 @@
#include <linux/types.h>
#include <linux/completion.h>
#include <linux/idr.h>
+#include <linux/xarray.h>
#include <uapi/drm/drm.h>
@@ -316,10 +317,8 @@ struct drm_file {
/** @table_lock: Protects @object_idr. */
spinlock_t table_lock;
- /** @syncobj_idr: Mapping of sync object handles to object pointers. */
- struct idr syncobj_idr;
- /** @syncobj_table_lock: Protects @syncobj_idr. */
- spinlock_t syncobj_table_lock;
+ /** @syncobj_xa: Mapping of sync object handles to object pointers. */
+ struct xarray syncobj_xa;
/** @filp: Pointer to the core file structure. */
struct file *filp;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index f4da8ed0d630..86f5846154f7 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -508,11 +508,11 @@ static inline int drm_gem_huge_mnt_create(struct drm_device *dev,
/**
* drm_gem_get_huge_mnt - Get the huge tmpfs mountpoint used by a DRM device
* @dev: DRM device
-
+ *
* This function gets the huge tmpfs mountpoint used by DRM device @dev. A huge
* tmpfs mountpoint is used instead of `shm_mnt` after a successful call to
* drm_gem_huge_mnt_create() when CONFIG_TRANSPARENT_HUGEPAGE is enabled.
-
+ *
* Returns:
* The huge tmpfs mountpoint in use, NULL otherwise.
*/
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index fb88301b3c45..78e07c2507c7 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -645,6 +645,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_fault(struct drm_gpu_scheduler *sched);
+bool drm_sched_is_stopped(struct drm_gpu_scheduler *sched);
struct drm_gpu_scheduler *
drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
@@ -674,6 +675,7 @@ bool drm_sched_job_has_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_increase_karma(struct drm_sched_job *bad);
+bool drm_sched_job_is_signaled(struct drm_sched_job *job);
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
int threshold)
@@ -698,4 +700,54 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list);
+/**
+ * struct drm_sched_pending_job_iter - DRM scheduler pending job iterator state
+ * @sched: DRM scheduler associated with pending job iterator
+ */
+struct drm_sched_pending_job_iter {
+ struct drm_gpu_scheduler *sched;
+};
+
+/* Drivers should never call this directly */
+static inline struct drm_sched_pending_job_iter
+__drm_sched_pending_job_iter_begin(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_pending_job_iter iter = {
+ .sched = sched,
+ };
+
+ WARN_ON(!drm_sched_is_stopped(sched));
+ return iter;
+}
+
+/* Drivers should never call this directly */
+static inline void
+__drm_sched_pending_job_iter_end(const struct drm_sched_pending_job_iter iter)
+{
+ WARN_ON(!drm_sched_is_stopped(iter.sched));
+}
+
+DEFINE_CLASS(drm_sched_pending_job_iter, struct drm_sched_pending_job_iter,
+ __drm_sched_pending_job_iter_end(_T),
+ __drm_sched_pending_job_iter_begin(__sched),
+ struct drm_gpu_scheduler *__sched);
+static inline void *
+class_drm_sched_pending_job_iter_lock_ptr(class_drm_sched_pending_job_iter_t *_T)
+{ return _T; }
+#define class_drm_sched_pending_job_iter_is_conditional false
+
+/**
+ * drm_sched_for_each_pending_job() - Iterator for each pending job in scheduler
+ * @__job: Current pending job being iterated over
+ * @__sched: DRM scheduler to iterate over pending jobs
+ * @__entity: DRM scheduler entity to filter jobs, NULL indicates no filter
+ *
+ * Iterator for each pending job in scheduler, filtering on an entity, and
+ * enforcing scheduler is fully stopped
+ */
+#define drm_sched_for_each_pending_job(__job, __sched, __entity) \
+ scoped_guard(drm_sched_pending_job_iter, (__sched)) \
+ list_for_each_entry((__job), &(__sched)->pending_list, list) \
+ for_each_if(!(__entity) || (__job)->entity == (__entity))
+
#endif
diff --git a/include/trace/events/dma_buf.h b/include/trace/events/dma_buf.h
new file mode 100644
index 000000000000..35f8140095f4
--- /dev/null
+++ b/include/trace/events/dma_buf.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dma_buf
+
+#if !defined(_TRACE_DMA_BUF_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DMA_BUF_H
+
+#include <linux/dma-buf.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(dma_buf,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf),
+
+ TP_STRUCT__entry(
+ __string(exp_name, dmabuf->exp_name)
+ __field(size_t, size)
+ __field(ino_t, ino)
+ ),
+
+ TP_fast_assign(
+ __assign_str(exp_name);
+ __entry->size = dmabuf->size;
+ __entry->ino = dmabuf->file->f_inode->i_ino;
+ ),
+
+ TP_printk("exp_name=%s size=%zu ino=%lu",
+ __get_str(exp_name),
+ __entry->size,
+ __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(dma_buf_attach_dev,
+
+ TP_PROTO(struct dma_buf *dmabuf, struct dma_buf_attachment *attach,
+ bool is_dynamic, struct device *dev),
+
+ TP_ARGS(dmabuf, attach, is_dynamic, dev),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(dev))
+ __string(exp_name, dmabuf->exp_name)
+ __field(size_t, size)
+ __field(ino_t, ino)
+ __field(struct dma_buf_attachment *, attach)
+ __field(bool, is_dynamic)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __assign_str(exp_name);
+ __entry->size = dmabuf->size;
+ __entry->ino = dmabuf->file->f_inode->i_ino;
+ __entry->is_dynamic = is_dynamic;
+ __entry->attach = attach;
+ ),
+
+ TP_printk("exp_name=%s size=%zu ino=%lu attachment:%p is_dynamic=%d dev_name=%s",
+ __get_str(exp_name),
+ __entry->size,
+ __entry->ino,
+ __entry->attach,
+ __entry->is_dynamic,
+ __get_str(dev_name))
+);
+
+DECLARE_EVENT_CLASS(dma_buf_fd,
+
+ TP_PROTO(struct dma_buf *dmabuf, int fd),
+
+ TP_ARGS(dmabuf, fd),
+
+ TP_STRUCT__entry(
+ __string(exp_name, dmabuf->exp_name)
+ __field(size_t, size)
+ __field(ino_t, ino)
+ __field(int, fd)
+ ),
+
+ TP_fast_assign(
+ __assign_str(exp_name);
+ __entry->size = dmabuf->size;
+ __entry->ino = dmabuf->file->f_inode->i_ino;
+ __entry->fd = fd;
+ ),
+
+ TP_printk("exp_name=%s size=%zu ino=%lu fd=%d",
+ __get_str(exp_name),
+ __entry->size,
+ __entry->ino,
+ __entry->fd)
+);
+
+DEFINE_EVENT(dma_buf, dma_buf_export,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf)
+);
+
+DEFINE_EVENT(dma_buf, dma_buf_mmap_internal,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf)
+);
+
+DEFINE_EVENT(dma_buf, dma_buf_mmap,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf)
+);
+
+DEFINE_EVENT(dma_buf, dma_buf_put,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf)
+);
+
+DEFINE_EVENT(dma_buf_attach_dev, dma_buf_dynamic_attach,
+
+ TP_PROTO(struct dma_buf *dmabuf, struct dma_buf_attachment *attach,
+ bool is_dynamic, struct device *dev),
+
+ TP_ARGS(dmabuf, attach, is_dynamic, dev)
+);
+
+DEFINE_EVENT(dma_buf_attach_dev, dma_buf_detach,
+
+ TP_PROTO(struct dma_buf *dmabuf, struct dma_buf_attachment *attach,
+ bool is_dynamic, struct device *dev),
+
+ TP_ARGS(dmabuf, attach, is_dynamic, dev)
+);
+
+DEFINE_EVENT(dma_buf_fd, dma_buf_fd,
+
+ TP_PROTO(struct dma_buf *dmabuf, int fd),
+
+ TP_ARGS(dmabuf, fd)
+);
+
+DEFINE_EVENT(dma_buf_fd, dma_buf_get,
+
+ TP_PROTO(struct dma_buf *dmabuf, int fd),
+
+ TP_ARGS(dmabuf, fd)
+);
+
+#endif /* _TRACE_DMA_BUF_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/drm/amdxdna_accel.h b/include/uapi/drm/amdxdna_accel.h
index 62c917fd4f7b..9c44db2b3dcd 100644
--- a/include/uapi/drm/amdxdna_accel.h
+++ b/include/uapi/drm/amdxdna_accel.h
@@ -19,6 +19,14 @@ extern "C" {
#define AMDXDNA_INVALID_BO_HANDLE 0
#define AMDXDNA_INVALID_FENCE_HANDLE 0
+/*
+ * Define hardware context priority
+ */
+#define AMDXDNA_QOS_REALTIME_PRIORITY 0x100
+#define AMDXDNA_QOS_HIGH_PRIORITY 0x180
+#define AMDXDNA_QOS_NORMAL_PRIORITY 0x200
+#define AMDXDNA_QOS_LOW_PRIORITY 0x280
+
enum amdxdna_device_type {
AMDXDNA_DEV_TYPE_UNKNOWN = -1,
AMDXDNA_DEV_TYPE_KMQ,
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index e238c6264fa1..b401ac585d6a 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -350,7 +350,7 @@ struct drm_panthor_gpu_info {
__u32 as_present;
/**
- * @select_coherency: Coherency selected for this device.
+ * @selected_coherency: Coherency selected for this device.
*
* One of drm_panthor_gpu_coherency.
*/