summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorJonathan Cameron <Jonathan.Cameron@huawei.com>2025-09-13 15:00:48 +0100
committerJonathan Cameron <Jonathan.Cameron@huawei.com>2025-09-13 15:00:48 +0100
commit421d4487ef2ead206f57a8950ea9bdd1f7a7b39a (patch)
treeb679546eeaa70f88b5c8d6caeb79e214efb68121 /include/linux
parent3422b4bc606eee2ba7758ea9347c83332eeec3e3 (diff)
parent1b237f190eb3d36f52dffe07a40b5eb210280e00 (diff)
Merge tag 'v6.17-rc3' into togreg
Linux 6.17-rc3
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/compiler.h8
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/export.h2
-rw-r--r--include/linux/firewire.h4
-rw-r--r--include/linux/iosys-map.h7
-rw-r--r--include/linux/iov_iter.h20
-rw-r--r--include/linux/kcov.h47
-rw-r--r--include/linux/migrate.h5
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/linux/netfs.h1
-rw-r--r--include/linux/nfs_page.h1
-rw-r--r--include/linux/sched.h29
13 files changed, 56 insertions, 75 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 95886b404b16..fe1797bbec42 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -656,6 +656,7 @@ enum {
QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
+ QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */
QUEUE_FLAG_MAX
};
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 6f04a1d8c720..64ff73c533e5 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -288,14 +288,6 @@ static inline void *offset_to_ptr(const int *off)
#define __ADDRESSABLE(sym) \
___ADDRESSABLE(sym, __section(".discard.addressable"))
-#define __ADDRESSABLE_ASM(sym) \
- .pushsection .discard.addressable,"aw"; \
- .align ARCH_SEL(8,4); \
- ARCH_SEL(.quad, .long) __stringify(sym); \
- .popsection;
-
-#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))
-
/*
* This returns a constant expression while determining if an argument is
* a constant expression, most importantly without evaluating the argument.
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index edfa61d80702..62cd7b35a29c 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -168,6 +168,7 @@ enum cpuhp_state {
CPUHP_AP_QCOM_TIMER_STARTING,
CPUHP_AP_TEGRA_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
+ CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_REALTEK_TIMER_STARTING,
diff --git a/include/linux/export.h b/include/linux/export.h
index f35d03b4113b..a686fd0ba406 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -91,6 +91,6 @@
#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", ns)
#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", ns)
-#define EXPORT_SYMBOL_GPL_FOR_MODULES(sym, mods) __EXPORT_SYMBOL(sym, "GPL", "module:" mods)
+#define EXPORT_SYMBOL_FOR_MODULES(sym, mods) __EXPORT_SYMBOL(sym, "GPL", "module:" mods)
#endif /* _LINUX_EXPORT_H */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index cceb70415ed2..d38c6e538e5c 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -341,7 +341,11 @@ struct fw_address_handler {
u64 length;
fw_address_callback_t address_callback;
void *callback_data;
+
+ // Only for core functions.
struct list_head link;
+ struct kref kref;
+ struct completion done;
};
struct fw_address_region {
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
index 4696abfd311c..3e85afe794c0 100644
--- a/include/linux/iosys-map.h
+++ b/include/linux/iosys-map.h
@@ -264,12 +264,7 @@ static inline bool iosys_map_is_set(const struct iosys_map *map)
*/
static inline void iosys_map_clear(struct iosys_map *map)
{
- if (map->is_iomem) {
- map->vaddr_iomem = NULL;
- map->is_iomem = false;
- } else {
- map->vaddr = NULL;
- }
+ memset(map, 0, sizeof(*map));
}
/**
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
index c4aa58032faf..f9a17fbbd398 100644
--- a/include/linux/iov_iter.h
+++ b/include/linux/iov_iter.h
@@ -160,7 +160,7 @@ size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2
do {
struct folio *folio = folioq_folio(folioq, slot);
- size_t part, remain, consumed;
+ size_t part, remain = 0, consumed;
size_t fsize;
void *base;
@@ -168,14 +168,16 @@ size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2
break;
fsize = folioq_folio_size(folioq, slot);
- base = kmap_local_folio(folio, skip);
- part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
- remain = step(base, progress, part, priv, priv2);
- kunmap_local(base);
- consumed = part - remain;
- len -= consumed;
- progress += consumed;
- skip += consumed;
+ if (skip < fsize) {
+ base = kmap_local_folio(folio, skip);
+ part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ }
if (skip >= fsize) {
skip = 0;
slot++;
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index 75a2fb8b16c3..0143358874b0 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -57,47 +57,21 @@ static inline void kcov_remote_start_usb(u64 id)
/*
* The softirq flavor of kcov_remote_*() functions is introduced as a temporary
- * workaround for KCOV's lack of nested remote coverage sections support.
- *
- * Adding support is tracked in https://bugzilla.kernel.org/show_bug.cgi?id=210337.
- *
- * kcov_remote_start_usb_softirq():
- *
- * 1. Only collects coverage when called in the softirq context. This allows
- * avoiding nested remote coverage collection sections in the task context.
- * For example, USB/IP calls usb_hcd_giveback_urb() in the task context
- * within an existing remote coverage collection section. Thus, KCOV should
- * not attempt to start collecting coverage within the coverage collection
- * section in __usb_hcd_giveback_urb() in this case.
- *
- * 2. Disables interrupts for the duration of the coverage collection section.
- * This allows avoiding nested remote coverage collection sections in the
- * softirq context (a softirq might occur during the execution of a work in
- * the BH workqueue, which runs with in_serving_softirq() > 0).
- * For example, usb_giveback_urb_bh() runs in the BH workqueue with
- * interrupts enabled, so __usb_hcd_giveback_urb() might be interrupted in
- * the middle of its remote coverage collection section, and the interrupt
- * handler might invoke __usb_hcd_giveback_urb() again.
+ * work around for kcov's lack of nested remote coverage sections support in
+ * task context. Adding support for nested sections is tracked in:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=210337
*/
-static inline unsigned long kcov_remote_start_usb_softirq(u64 id)
+static inline void kcov_remote_start_usb_softirq(u64 id)
{
- unsigned long flags = 0;
-
- if (in_serving_softirq()) {
- local_irq_save(flags);
+ if (in_serving_softirq() && !in_hardirq())
kcov_remote_start_usb(id);
- }
-
- return flags;
}
-static inline void kcov_remote_stop_softirq(unsigned long flags)
+static inline void kcov_remote_stop_softirq(void)
{
- if (in_serving_softirq()) {
+ if (in_serving_softirq() && !in_hardirq())
kcov_remote_stop();
- local_irq_restore(flags);
- }
}
#ifdef CONFIG_64BIT
@@ -131,11 +105,8 @@ static inline u64 kcov_common_handle(void)
}
static inline void kcov_remote_start_common(u64 id) {}
static inline void kcov_remote_start_usb(u64 id) {}
-static inline unsigned long kcov_remote_start_usb_softirq(u64 id)
-{
- return 0;
-}
-static inline void kcov_remote_stop_softirq(unsigned long flags) {}
+static inline void kcov_remote_start_usb_softirq(u64 id) {}
+static inline void kcov_remote_stop_softirq(void) {}
#endif /* CONFIG_KCOV */
#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index acadd41e0b5c..9009e27b5f44 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -79,6 +79,7 @@ void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping,
struct folio *newfolio, struct folio *folio, int extra_count);
+int set_movable_ops(const struct movable_operations *ops, enum pagetype type);
#else
@@ -100,6 +101,10 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
{
return -ENOSYS;
}
+static inline int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
+{
+ return -ENOSYS;
+}
#endif /* CONFIG_MIGRATION */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5e5de4b0a433..f3a3b761abfb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2071,6 +2071,8 @@ enum netdev_reg_state {
* @max_pacing_offload_horizon: max EDT offload horizon in nsec.
* @napi_config: An array of napi_config structures containing per-NAPI
* settings.
+ * @num_napi_configs: number of allocated NAPI config structs,
+ * always >= max(num_rx_queues, num_tx_queues).
* @gro_flush_timeout: timeout for GRO layer in NAPI
* @napi_defer_hard_irqs: If not zero, provides a counter that would
* allow to avoid NIC hard IRQ, on busy queues.
@@ -2482,8 +2484,9 @@ struct net_device {
u64 max_pacing_offload_horizon;
struct napi_config *napi_config;
- unsigned long gro_flush_timeout;
+ u32 num_napi_configs;
u32 napi_defer_hard_irqs;
+ unsigned long gro_flush_timeout;
/**
* @up: copy of @state's IFF_UP, but safe to read with just @lock.
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 185bd8196503..98c96d649bf9 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -150,6 +150,7 @@ struct netfs_io_stream {
bool active; /* T if stream is active */
bool need_retry; /* T if this stream needs retrying */
bool failed; /* T if this stream failed */
+ bool transferred_valid; /* T is ->transferred is valid */
};
/*
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 169b4ae30ff4..9aed39abc94b 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -160,6 +160,7 @@ extern void nfs_join_page_group(struct nfs_page *head,
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+extern bool nfs_page_group_sync_on_bit_locked(struct nfs_page *, unsigned int);
extern int nfs_page_set_headlock(struct nfs_page *req);
extern void nfs_page_clear_headlock(struct nfs_page *req);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2b272382673d..f8188b833350 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2152,6 +2152,8 @@ static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
+ struct mutex *blocked_on = READ_ONCE(p->blocked_on);
+
WARN_ON_ONCE(!m);
/* The task should only be setting itself as blocked */
WARN_ON_ONCE(p != current);
@@ -2162,8 +2164,8 @@ static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
* with a different mutex. Note, setting it to the same
* lock repeatedly is ok.
*/
- WARN_ON_ONCE(p->blocked_on && p->blocked_on != m);
- p->blocked_on = m;
+ WARN_ON_ONCE(blocked_on && blocked_on != m);
+ WRITE_ONCE(p->blocked_on, m);
}
static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
@@ -2174,16 +2176,19 @@ static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
{
- WARN_ON_ONCE(!m);
- /* Currently we serialize blocked_on under the mutex::wait_lock */
- lockdep_assert_held_once(&m->wait_lock);
- /*
- * There may be cases where we re-clear already cleared
- * blocked_on relationships, but make sure we are not
- * clearing the relationship with a different lock.
- */
- WARN_ON_ONCE(m && p->blocked_on && p->blocked_on != m);
- p->blocked_on = NULL;
+ if (m) {
+ struct mutex *blocked_on = READ_ONCE(p->blocked_on);
+
+ /* Currently we serialize blocked_on under the mutex::wait_lock */
+ lockdep_assert_held_once(&m->wait_lock);
+ /*
+ * There may be cases where we re-clear already cleared
+ * blocked_on relationships, but make sure we are not
+ * clearing the relationship with a different lock.
+ */
+ WARN_ON_ONCE(blocked_on && blocked_on != m);
+ }
+ WRITE_ONCE(p->blocked_on, NULL);
}
static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)