summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_drivers.h2
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl1.h257
-rw-r--r--include/acpi/actbl2.h194
-rw-r--r--include/acpi/actbl3.h1
-rw-r--r--include/acpi/acuuid.h6
-rw-r--r--include/acpi/cppc_acpi.h11
-rw-r--r--include/acpi/ghes.h2
-rw-r--r--include/acpi/processor.h34
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/audit_change_attr.h3
-rw-r--r--include/asm-generic/audit_read.h6
-rw-r--r--include/asm-generic/rqspinlock.h2
-rw-r--r--include/asm-generic/tlb.h77
-rw-r--r--include/asm-generic/topology.h8
-rw-r--r--include/asm-generic/vmlinux.lds.h3
-rw-r--r--include/crypto/aes.h278
-rw-r--r--include/crypto/df_sp80090a.h2
-rw-r--r--include/crypto/gcm.h2
-rw-r--r--include/crypto/internal/acompress.h7
-rw-r--r--include/crypto/internal/engine.h2
-rw-r--r--include/crypto/internal/skcipher.h7
-rw-r--r--include/crypto/mldsa.h62
-rw-r--r--include/crypto/nh.h52
-rw-r--r--include/crypto/nhpoly1305.h74
-rw-r--r--include/crypto/public_key.h6
-rw-r--r--include/cxl/event.h22
-rw-r--r--include/drm/bridge/dw_hdmi_qp.h1
-rw-r--r--include/drm/display/drm_dp_helper.h57
-rw-r--r--include/drm/drm_atomic_helper.h22
-rw-r--r--include/drm/drm_bridge.h249
-rw-r--r--include/drm/drm_pagemap.h19
-rw-r--r--include/dt-bindings/clock/google,gs101.h36
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8917.h1
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gcc.h3
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h3
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h3
-rw-r--r--include/dt-bindings/power/qcom,rpmhpd.h1
-rw-r--r--include/dt-bindings/reset/spacemit,k3-resets.h171
-rw-r--r--include/dt-bindings/thermal/mediatek,lvts-thermal.h29
-rw-r--r--include/hyperv/hvgdk_mini.h7
-rw-r--r--include/hyperv/hvhdk.h47
-rw-r--r--include/kunit/test.h3
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/acpi_iort.h11
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h18
-rw-r--r--include/linux/atomic/atomic-instrumented.h26
-rw-r--r--include/linux/atomic/atomic-long.h10
-rw-r--r--include/linux/audit.h25
-rw-r--r--include/linux/audit_arch.h7
-rw-r--r--include/linux/bio.h33
-rw-r--r--include/linux/bit_spinlock.h24
-rw-r--r--include/linux/bitfield.h5
-rw-r--r--include/linux/blk-crypto.h32
-rw-r--r--include/linux/blk-integrity.h6
-rw-r--r--include/linux/blk-mq-dma.h2
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk_types.h7
-rw-r--r--include/linux/blkdev.h24
-rw-r--r--include/linux/bpf-cgroup.h4
-rw-r--r--include/linux/bpf.h178
-rw-r--r--include/linux/bpf_local_storage.h29
-rw-r--r--include/linux/bpf_mprog.h10
-rw-r--r--include/linux/bpf_verifier.h14
-rw-r--r--include/linux/btf.h9
-rw-r--r--include/linux/buildid.h3
-rw-r--r--include/linux/can/can-ml.h24
-rw-r--r--include/linux/can/dev.h8
-rw-r--r--include/linux/ceph/ceph_fs.h6
-rw-r--r--include/linux/cgroup-defs.h25
-rw-r--r--include/linux/cleanup.h58
-rw-r--r--include/linux/cma.h9
-rw-r--r--include/linux/compiler-context-analysis.h436
-rw-r--r--include/linux/compiler.h12
-rw-r--r--include/linux/compiler_types.h99
-rw-r--r--include/linux/console.h4
-rw-r--r--include/linux/cper.h3
-rw-r--r--include/linux/cpufreq.h5
-rw-r--r--include/linux/cpuhplock.h1
-rw-r--r--include/linux/cpuset.h8
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/debugfs.h12
-rw-r--r--include/linux/device/driver.h9
-rw-r--r--include/linux/device_cgroup.h2
-rw-r--r--include/linux/efi.h9
-rw-r--r--include/linux/energy_model.h2
-rw-r--r--include/linux/entry-common.h167
-rw-r--r--include/linux/exportfs.h33
-rw-r--r--include/linux/filelock.h19
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h30
-rw-r--r--include/linux/firmware/xlnx-zynqmp-crypto.h119
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h14
-rw-r--r--include/linux/fortify-string.h8
-rw-r--r--include/linux/fs.h88
-rw-r--r--include/linux/fs/super_types.h8
-rw-r--r--include/linux/fserror.h75
-rw-r--r--include/linux/ftrace.h33
-rw-r--r--include/linux/ftrace_regs.h25
-rw-r--r--include/linux/getcpu.h19
-rw-r--r--include/linux/hisi_acc_qm.h15
-rw-r--r--include/linux/hrtimer.h17
-rw-r--r--include/linux/hrtimer_defs.h20
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/hw_random.h2
-rw-r--r--include/linux/i3c/device.h22
-rw-r--r--include/linux/i3c/master.h11
-rw-r--r--include/linux/iio/iio-opaque.h2
-rw-r--r--include/linux/init_syscalls.h1
-rw-r--r--include/linux/initrd.h2
-rw-r--r--include/linux/interrupt.h26
-rw-r--r--include/linux/io_uring.h14
-rw-r--r--include/linux/io_uring_types.h42
-rw-r--r--include/linux/iomap.h25
-rw-r--r--include/linux/irq.h15
-rw-r--r--include/linux/irqchip/arm-gic-v5.h8
-rw-r--r--include/linux/irqchip/irq-renesas-rzt2h.h23
-rw-r--r--include/linux/irqdesc.h17
-rw-r--r--include/linux/irqdomain.h30
-rw-r--r--include/linux/jbd2.h3
-rw-r--r--include/linux/kasan.h14
-rw-r--r--include/linux/kfence.h1
-rw-r--r--include/linux/kref.h2
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/ktime.h2
-rw-r--r--include/linux/list_bl.h2
-rw-r--r--include/linux/livepatch.h3
-rw-r--r--include/linux/local_lock.h59
-rw-r--r--include/linux/local_lock_internal.h72
-rw-r--r--include/linux/lockdep.h12
-rw-r--r--include/linux/lockref.h4
-rw-r--r--include/linux/lsm_hooks.h4
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h19
-rw-r--r--include/linux/memcontrol.h23
-rw-r--r--include/linux/memfd.h6
-rw-r--r--include/linux/memremap.h9
-rw-r--r--include/linux/mfd/wm8350/core.h2
-rw-r--r--include/linux/mm.h39
-rw-r--r--include/linux/mm_types.h19
-rw-r--r--include/linux/mmu_context.h2
-rw-r--r--include/linux/mmzone.h9
-rw-r--r--include/linux/module.h18
-rw-r--r--include/linux/moduleparam.h8
-rw-r--r--include/linux/msi.h16
-rw-r--r--include/linux/mtd/jedec.h2
-rw-r--r--include/linux/mtd/nand-ecc-sw-hamming.h2
-rw-r--r--include/linux/mtd/ndfc.h2
-rw-r--r--include/linux/mtd/onfi.h2
-rw-r--r--include/linux/mtd/platnand.h2
-rw-r--r--include/linux/mtd/rawnand.h2
-rw-r--r--include/linux/mutex.h40
-rw-r--r--include/linux/mutex_types.h4
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/ns/ns_common_types.h4
-rw-r--r--include/linux/nubus.h3
-rw-r--r--include/linux/of_irq.h41
-rw-r--r--include/linux/oid_registry.h5
-rw-r--r--include/linux/overflow.h42
-rw-r--r--include/linux/pagemap.h11
-rw-r--r--include/linux/pci-acpi.h3
-rw-r--r--include/linux/pci-ide.h4
-rw-r--r--include/linux/pci.h15
-rw-r--r--include/linux/percpu-rwsem.h1
-rw-r--r--include/linux/perf_event.h37
-rw-r--r--include/linux/platform_data/hwmon-s3c.h36
-rw-r--r--include/linux/platform_data/mipi-i3c-hci.h15
-rw-r--r--include/linux/plist.h2
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/posix_acl_xattr.h5
-rw-r--r--include/linux/rcupdate.h121
-rw-r--r--include/linux/rcupdate_trace.h166
-rw-r--r--include/linux/refcount.h6
-rw-r--r--include/linux/resctrl.h57
-rw-r--r--include/linux/resctrl_types.h11
-rw-r--r--include/linux/restart_block.h4
-rw-r--r--include/linux/rhashtable.h16
-rw-r--r--include/linux/rseq.h11
-rw-r--r--include/linux/rseq_entry.h192
-rw-r--r--include/linux/rseq_types.h38
-rw-r--r--include/linux/rslib.h2
-rw-r--r--include/linux/rwlock.h19
-rw-r--r--include/linux/rwlock_api_smp.h43
-rw-r--r--include/linux/rwlock_rt.h43
-rw-r--r--include/linux/rwlock_types.h10
-rw-r--r--include/linux/rwsem.h78
-rw-r--r--include/linux/sched.h33
-rw-r--r--include/linux/sched/cputime.h18
-rw-r--r--include/linux/sched/isolation.h16
-rw-r--r--include/linux/sched/mm.h1
-rw-r--r--include/linux/sched/signal.h16
-rw-r--r--include/linux/sched/task.h6
-rw-r--r--include/linux/sched/wake_q.h3
-rw-r--r--include/linux/scmi_imx_protocol.h2
-rw-r--r--include/linux/screen_info.h2
-rw-r--r--include/linux/seqlock.h57
-rw-r--r--include/linux/seqlock_types.h5
-rw-r--r--include/linux/skbuff.h12
-rw-r--r--include/linux/skmsg.h70
-rw-r--r--include/linux/slab.h106
-rw-r--r--include/linux/soc/airoha/airoha_offload.h12
-rw-r--r--include/linux/soc/apple/rtkit.h7
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h93
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h4
-rw-r--r--include/linux/soc/qcom/mdt_loader.h22
-rw-r--r--include/linux/soc/qcom/ubwc.h1
-rw-r--r--include/linux/spinlock.h119
-rw-r--r--include/linux/spinlock_api_smp.h34
-rw-r--r--include/linux/spinlock_api_up.h112
-rw-r--r--include/linux/spinlock_rt.h36
-rw-r--r--include/linux/spinlock_types.h10
-rw-r--r--include/linux/spinlock_types_raw.h5
-rw-r--r--include/linux/srcu.h73
-rw-r--r--include/linux/srcutiny.h6
-rw-r--r--include/linux/srcutree.h10
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--include/linux/sysfb.h23
-rw-r--r--include/linux/tee_core.h9
-rw-r--r--include/linux/tee_drv.h12
-rw-r--r--include/linux/textsearch.h1
-rw-r--r--include/linux/thread_info.h16
-rw-r--r--include/linux/tick.h2
-rw-r--r--include/linux/timecounter.h31
-rw-r--r--include/linux/tnum.h5
-rw-r--r--include/linux/trace_recursion.h9
-rw-r--r--include/linux/tsm.h3
-rw-r--r--include/linux/types.h5
-rw-r--r--include/linux/uio.h3
-rw-r--r--include/linux/uio_driver.h2
-rw-r--r--include/linux/unwind_user.h18
-rw-r--r--include/linux/uprobes.h1
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/util_macros.h2
-rw-r--r--include/linux/vmstat.h2
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/ww_mutex.h21
-rw-r--r--include/linux/xattr.h2
-rw-r--r--include/media/v4l2-ioctl.h15
-rw-r--r--include/net/bonding.h13
-rw-r--r--include/net/cfg80211.h3
-rw-r--r--include/net/dropreason-core.h6
-rw-r--r--include/net/hotdata.h1
-rw-r--r--include/net/ip_tunnels.h13
-rw-r--r--include/net/nfc/nfc.h2
-rw-r--r--include/scsi/scsi_eh.h6
-rw-r--r--include/soc/spacemit/ccu.h21
-rw-r--r--include/soc/spacemit/k1-syscon.h12
-rw-r--r--include/soc/spacemit/k3-syscon.h273
-rw-r--r--include/soc/tegra/pmc.h60
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/trace/events/btrfs.h3
-rw-r--r--include/trace/events/dma.h25
-rw-r--r--include/trace/events/erofs.h10
-rw-r--r--include/trace/events/rxrpc.h4
-rw-r--r--include/trace/events/writeback.h6
-rw-r--r--include/trace/misc/nfs.h2
-rw-r--r--include/uapi/asm-generic/errno.h2
-rw-r--r--include/uapi/asm-generic/unistd.h5
-rw-r--r--include/uapi/linux/blkzoned.h6
-rw-r--r--include/uapi/linux/bpf.h28
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/uapi/linux/btrfs_tree.h34
-rw-r--r--include/uapi/linux/comedi.h2
-rw-r--r--include/uapi/linux/dev_energymodel.h82
-rw-r--r--include/uapi/linux/energy_model.h63
-rw-r--r--include/uapi/linux/ext4.h2
-rw-r--r--include/uapi/linux/if_alg.h2
-rw-r--r--include/uapi/linux/io_uring.h22
-rw-r--r--include/uapi/linux/io_uring/bpf_filter.h62
-rw-r--r--include/uapi/linux/kvm.h3
-rw-r--r--include/uapi/linux/landlock.h37
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/media/arm/mali-c55-config.h9
-rw-r--r--include/uapi/linux/mount.h13
-rw-r--r--include/uapi/linux/nfs.h1
-rw-r--r--include/uapi/linux/nilfs2_api.h4
-rw-r--r--include/uapi/linux/nilfs2_ondisk.h163
-rw-r--r--include/uapi/linux/nl80211.h5
-rw-r--r--include/uapi/linux/perf_event.h29
-rw-r--r--include/uapi/linux/prctl.h10
-rw-r--r--include/uapi/linux/rseq.h41
-rw-r--r--include/uapi/linux/stddef.h4
-rw-r--r--include/uapi/linux/sysctl.h1
-rw-r--r--include/uapi/linux/ublk_cmd.h121
-rw-r--r--include/uapi/linux/xattr.h2
-rw-r--r--include/vdso/gettime.h1
-rw-r--r--include/vdso/unaligned.h41
-rw-r--r--include/video/edid.h4
-rw-r--r--include/xen/xen.h2
290 files changed, 6154 insertions, 1584 deletions
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index b14d165632e7..402b97d12138 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -51,7 +51,7 @@
int acpi_irq_penalty_init(void);
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
- int *polarity, char **name);
+ int *polarity, char **name, u32 *gsi);
int acpi_pci_link_free_irq(acpi_handle handle);
/* ACPI PCI Device Binding */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e65a2afe9250..49d1749f30bb 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20250807
+#define ACPI_CA_VERSION 0x20251212
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7f35eb0e8458..4e15583e0d25 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -37,6 +37,7 @@
#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */
#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
#define ACPI_SIG_DRTM "DRTM" /* Dynamic Root of Trust for Measurement table */
+#define ACPI_SIG_DTPR "DTPR" /* DMA TXT Protection Ranges table */
#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */
#define ACPI_SIG_EINJ "EINJ" /* Error Injection table */
#define ACPI_SIG_ERST "ERST" /* Error Record Serialization Table */
@@ -1001,6 +1002,262 @@ struct acpi_drtm_dps_id {
/*******************************************************************************
*
+ * DTPR - DMA TXT Protection Ranges Table
+ * Version 1
+ *
+ * Conforms to "Intel® Trusted Execution Technology (Intel® TXT) DMA Protection
+ * Ranges",
+ * Revision 0.73, August 2021
+ *
+ ******************************************************************************/
+
+struct acpi_table_dtpr {
+ struct acpi_table_header header;
+ u32 flags; /* 36 */
+ u32 ins_cnt;
+};
+
+struct acpi_tpr_array {
+ u64 base;
+};
+
+struct acpi_tpr_instance {
+ u32 flags;
+ u32 tpr_cnt;
+};
+
+struct acpi_tpr_aux_sr {
+ u32 srl_cnt;
+};
+
+/*
+ * TPRn_BASE (ACPI_TPRN_BASE_REG)
+ *
+ * Specifies the start address of TPRn region. TPR region address and size must
+ * be with 1MB resolution. These bits are compared with the result of the
+ * TPRn_LIMIT[63:20], which is applied to the incoming address, to
+ * determine if an access fall within the TPRn defined region.
+ *
+ * Minimal TPRn_Base resolution is 1MB. Applied to the incoming address, to
+ * determine if an access fall within the TPRn defined region. Width is
+ * determined by a bus width which can be obtained via CPUID
+ * function 0x80000008.
+ */
+
+typedef u64 ACPI_TPRN_BASE_REG;
+
+/* TPRn_BASE Register Bit Masks */
+
+/* Bit 3 - RW: access: 1 == RO, 0 == RW register (for TPR must be RW) */
+#define ACPI_TPRN_BASE_RW_SHIFT 3
+
+#define ACPI_TPRN_BASE_RW_MASK ((u64) 1 << ACPI_TPRN_BASE_RW_SHIFT)
+
+/*
+ * Bit 4 - Enable: 0 – TPRn address range enabled;
+ * 1 – TPRn address range disabled.
+ */
+#define ACPI_TPRN_BASE_ENABLE_SHIFT 4
+
+#define ACPI_TPRN_BASE_ENABLE_MASK ((u64) 1 << ACPI_TPRN_BASE_ENABLE_SHIFT)
+
+/* Bits 63:20 - tpr_base_rw */
+#define ACPI_TPRN_BASE_ADDR_SHIFT 20
+
+#define ACPI_TPRN_BASE_ADDR_MASK ((u64) 0xFFFFFFFFFFF << \
+ ACPI_TPRN_BASE_ADDR_SHIFT)
+
+/* TPRn_BASE Register Bit Handlers*/
+
+/*
+ * GET_TPRN_BASE_RW:
+ *
+ * Read RW bit from TPRn Base register - bit 3.
+ *
+ * Input:
+ * - reg (represents TPRn Base Register (ACPI_TPRN_BASE_REG))
+ *
+ * Output:
+ *
+ * Returns RW bit value (u64).
+ */
+#define GET_TPRN_BASE_RW(reg) (((u64) reg & ACPI_TPRN_BASE_RW_MASK) >> \
+ ACPI_TPRN_BASE_RW_SHIFT)
+
+/*
+ * GET_TPRN_BASE_ENABLE:
+ *
+ * Read Enable bit from TPRn Base register - bit 4.
+ *
+ * Input:
+ * - reg (represents TPRn Base Register (ACPI_TPRN_BASE_REG))
+ *
+ * Output:
+ *
+ * Returns Enable bit value (u64).
+ */
+#define GET_TPRN_BASE_ENABLE(reg) (((u64) reg & ACPI_TPRN_BASE_ENABLE_MASK) \
+ >> ACPI_TPRN_BASE_ENABLE_SHIFT)
+
+/*
+ * GET_TPRN_BASE_ADDR:
+ *
+ * Read TPRn Base Register address from bits 63:20.
+ *
+ * Input:
+ * - reg (represents TPRn Base Register (ACPI_TPRN_BASE_REG))
+ *
+ * Output:
+ *
+ * Returns TPRn Base Register address (u64).
+ */
+#define GET_TPRN_BASE_ADDR(reg) (((u64) reg & ACPI_TPRN_BASE_ADDR_MASK) \
+ >> ACPI_TPRN_BASE_ADDR_SHIFT)
+
+/*
+ * SET_TPRN_BASE_RW:
+ *
+ * Set RW bit in TPRn Base register - bit 3.
+ *
+ * Input:
+ * - reg (represents TPRn Base Register (ACPI_TPRN_BASE_REG))
+ * - val (represents RW value to be set (u64))
+ */
+#define SET_TPRN_BASE_RW(reg, val) ACPI_REGISTER_INSERT_VALUE(reg, \
+ ACPI_TPRN_BASE_RW_SHIFT, \
+ ACPI_TPRN_BASE_RW_MASK, val);
+
+/*
+ * SET_TPRN_BASE_ENABLE:
+ *
+ * Set Enable bit in TPRn Base register - bit 4.
+ *
+ * Input:
+ * - reg (represents TPRn Base Register (ACPI_TPRN_BASE_REG))
+ * - val (represents Enable value to be set (u64))
+ */
+#define SET_TPRN_BASE_ENABLE(reg, val) ACPI_REGISTER_INSERT_VALUE(reg, \
+ ACPI_TPRN_BASE_ENABLE_SHIFT, \
+ ACPI_TPRN_BASE_ENABLE_MASK, val);
+
+/*
+ * SET_TPRN_BASE_ADDR:
+ *
+ * Set TPRn Base Register address - bits 63:20
+ *
+ * Input
+ * - reg (represents TPRn Base Register (ACPI_TPRN_BASE_REG))
+ * - val (represents address value to be set (u64))
+ */
+#define SET_TPRN_BASE_ADDR(reg, val) ACPI_REGISTER_INSERT_VALUE(reg, \
+ ACPI_TPRN_BASE_ADDR_SHIFT, \
+ ACPI_TPRN_BASE_ADDR_MASK, val);
+
+/*
+ * TPRn_LIMIT
+ *
+ * This register defines an isolated region of memory that can be enabled
+ * to prohibit certain system agents from accessing memory. When an agent
+ * sends a request upstream, whether snooped or not, a TPR prevents that
+ * transaction from changing the state of memory.
+ *
+ * Minimal TPRn_Limit resolution is 1MB. Width is determined by a bus width.
+ */
+
+typedef u64 ACPI_TPRN_LIMIT_REG;
+
+/* TPRn_LIMIT Register Bit Masks */
+
+/* Bit 3 - RW: access: 1 == RO, 0 == RW register (for TPR must be RW) */
+#define ACPI_TPRN_LIMIT_RW_SHIFT 3
+
+#define ACPI_TPRN_LIMIT_RW_MASK ((u64) 1 << ACPI_TPRN_LIMIT_RW_SHIFT)
+
+/* Bits 63:20 - tpr_limit_rw */
+#define ACPI_TPRN_LIMIT_ADDR_SHIFT 20
+
+#define ACPI_TPRN_LIMIT_ADDR_MASK ((u64) 0xFFFFFFFFFFF << \
+ ACPI_TPRN_LIMIT_ADDR_SHIFT)
+
+/* TPRn_LIMIT Register Bit Handlers*/
+
+/*
+ * GET_TPRN_LIMIT_RW:
+ *
+ * Read RW bit from TPRn Limit register - bit 3.
+ *
+ * Input:
+ * - reg (represents TPRn Limit Register (ACPI_TPRN_LIMIT_REG))
+ *
+ * Output:
+ *
+ * Returns RW bit value (u64).
+ */
+#define GET_TPRN_LIMIT_RW(reg) (((u64) reg & ACPI_TPRN_LIMIT_RW_MASK) \
+ >> ACPI_TPRN_LIMIT_RW_SHIFT)
+
+/*
+ * GET_TPRN_LIMIT_ADDR:
+ *
+ * Read TPRn Limit Register address from bits 63:20.
+ *
+ * Input:
+ * - reg (represents TPRn Limit Register (ACPI_TPRN_LIMIT_REG))
+ *
+ * Output:
+ *
+ * Returns TPRn Limit Register address (u64).
+ */
+#define GET_TPRN_LIMIT_ADDR(reg) (((u64) reg & ACPI_TPRN_LIMIT_ADDR_MASK) \
+ >> ACPI_TPRN_LIMIT_ADDR_SHIFT)
+
+/*
+ * SET_TPRN_LIMIT_RW:
+ *
+ * Set RW bit in TPRn Limit register - bit 3.
+ *
+ * Input:
+ * - reg (represents TPRn Limit Register (ACPI_TPRN_LIMIT_REG))
+ * - val (represents RW value to be set (u64))
+ */
+#define SET_TPRN_LIMIT_RW(reg, val) ACPI_REGISTER_INSERT_VALUE(reg, \
+ ACPI_TPRN_LIMIT_RW_SHIFT, \
+ ACPI_TPRN_LIMIT_RW_MASK, val);
+
+/*
+ * SET_TPRN_LIMIT_ADDR:
+ *
+ * Set TPRn Limit Register address - bits 63:20.
+ *
+ * Input:
+ * - reg (represents TPRn Limit Register (ACPI_TPRN_LIMIT_REG))
+ * - val (represents address value to be set (u64))
+ */
+#define SET_TPRN_LIMIT_ADDR(reg, val) ACPI_REGISTER_INSERT_VALUE(reg, \
+ ACPI_TPRN_LIMIT_ADDR_SHIFT, \
+ ACPI_TPRN_LIMIT_ADDR_MASK, val);
+
+/*
+ * SERIALIZE_REQUEST
+ *
+ * This register is used to request serialization of non-coherent DMA
+ * transactions. OS shall issue it before changing of TPR settings
+ * (base / size).
+ */
+
+struct acpi_tpr_serialize_request {
+ u64 sr_register;
+ /*
+ * BIT 1 - Status of serialization request (RO)
+ * 0 == register idle, 1 == serialization in progress
+ * BIT 2 - Control field to initiate serialization (RW)
+ * 0 == normal, 1 == initialize serialization
+ * (self-clear to allow multiple serialization requests)
+ */
+};
+
+/*******************************************************************************
+ *
* ECDT - Embedded Controller Boot Resources Table
* Version 1
*
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index f726bce3eb84..5c0b55e7b3e4 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -31,7 +31,9 @@
#define ACPI_SIG_CDAT "CDAT" /* Coherent Device Attribute Table */
#define ACPI_SIG_ERDT "ERDT" /* Enhanced Resource Director Technology */
#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
+#define ACPI_SIG_IOVT "IOVT" /* I/O Virtualization Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
+#define ACPI_SIG_KEYP "KEYP" /* Key Programming Interface for IDE */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
@@ -680,6 +682,7 @@ enum acpi_iort_node_type {
ACPI_IORT_NODE_SMMU_V3 = 0x04,
ACPI_IORT_NODE_PMCG = 0x05,
ACPI_IORT_NODE_RMR = 0x06,
+ ACPI_IORT_NODE_IWB = 0x07,
};
struct acpi_iort_id_mapping {
@@ -858,6 +861,79 @@ struct acpi_iort_rmr_desc {
u32 reserved;
};
+struct acpi_iort_iwb {
+ u64 base_address;
+ u16 iwb_index; /* Unique IWB identifier matching with the IWB GSI namespace. */
+ char device_name[]; /* Path of the IWB namespace object */
+};
+
+/*******************************************************************************
+ *
+ * IOVT - I/O Virtualization Table
+ *
+ * Conforms to "LoongArch I/O Virtualization Table",
+ * Version 0.1, October 2024
+ *
+ ******************************************************************************/
+
+struct acpi_table_iovt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u16 iommu_count;
+ u16 iommu_offset;
+ u8 reserved[8];
+};
+
+/* IOVT subtable header */
+
+struct acpi_iovt_header {
+ u16 type;
+ u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_iovt_iommu_type {
+ ACPI_IOVT_IOMMU_V1 = 0x00,
+ ACPI_IOVT_IOMMU_RESERVED = 0x01 /* 1 and greater are reserved */
+};
+
+/* IOVT subtables */
+
+struct acpi_iovt_iommu {
+ struct acpi_iovt_header header;
+ u32 flags;
+ u16 segment;
+ u16 phy_width; /* Physical Address Width */
+ u16 virt_width; /* Virtual Address Width */
+ u16 max_page_level;
+ u64 page_size;
+ u32 device_id;
+ u64 base_address;
+ u32 address_space_size;
+ u8 interrupt_type;
+ u8 reserved[3];
+ u32 gsi_number;
+ u32 proximity_domain;
+ u32 max_device_num;
+ u32 device_entry_num;
+ u32 device_entry_offset;
+};
+
+struct acpi_iovt_device_entry {
+ u8 type;
+ u8 length;
+ u8 flags;
+ u8 reserved[3];
+ u16 device_id;
+};
+
+enum acpi_iovt_device_entry_type {
+ ACPI_IOVT_DEVICE_ENTRY_SINGLE = 0x00,
+ ACPI_IOVT_DEVICE_ENTRY_START = 0x01,
+ ACPI_IOVT_DEVICE_ENTRY_END = 0x02,
+ ACPI_IOVT_DEVICE_ENTRY_RESERVED = 0x03 /* 3 and greater are reserved */
+};
+
/*******************************************************************************
*
* IVRS - I/O Virtualization Reporting Structure
@@ -1067,6 +1143,64 @@ struct acpi_ivrs_memory {
/*******************************************************************************
*
+ * KEYP - Key Programming Interface for Root Complex Integrity and Data
+ * Encryption (IDE)
+ * Version 1
+ *
+ * Conforms to "Key Programming Interface for Root Complex Integrity and Data
+ * Encryption (IDE)" document. See under ACPI-Related Documents.
+ *
+ ******************************************************************************/
+struct acpi_table_keyp {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved;
+};
+
+/* KEYP common subtable header */
+
+struct acpi_keyp_common_header {
+ u8 type;
+ u8 reserved;
+ u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_keyp_type {
+ ACPI_KEYP_TYPE_CONFIG_UNIT = 0,
+};
+
+/* Root Port Information Structure */
+
+struct acpi_keyp_rp_info {
+ u16 segment;
+ u8 bus;
+ u8 devfn;
+};
+
+/* Key Configuration Unit Structure */
+
+struct acpi_keyp_config_unit {
+ struct acpi_keyp_common_header header;
+ u8 protocol_type;
+ u8 version;
+ u8 root_port_count;
+ u8 flags;
+ u64 register_base_address;
+ struct acpi_keyp_rp_info rp_info[];
+};
+
+enum acpi_keyp_protocol_type {
+ ACPI_KEYP_PROTO_TYPE_INVALID = 0,
+ ACPI_KEYP_PROTO_TYPE_PCIE,
+ ACPI_KEYP_PROTO_TYPE_CXL,
+ ACPI_KEYP_PROTO_TYPE_RESERVED
+};
+
+#define ACPI_KEYP_F_TVM_USABLE (1)
+
+/*******************************************************************************
+ *
* LPIT - Low Power Idle Table
*
* Conforms to "ACPI Low Power Idle Table (LPIT)" July 2014.
@@ -1167,7 +1301,10 @@ enum acpi_madt_type {
ACPI_MADT_TYPE_IMSIC = 25,
ACPI_MADT_TYPE_APLIC = 26,
ACPI_MADT_TYPE_PLIC = 27,
- ACPI_MADT_TYPE_RESERVED = 28, /* 28 to 0x7F are reserved */
+ ACPI_MADT_TYPE_GICV5_IRS = 28,
+ ACPI_MADT_TYPE_GICV5_ITS = 29,
+ ACPI_MADT_TYPE_GICV5_ITS_TRANSLATE = 30,
+ ACPI_MADT_TYPE_RESERVED = 31, /* 31 to 0x7F are reserved */
ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */
};
@@ -1289,7 +1426,7 @@ struct acpi_madt_local_x2apic_nmi {
u8 reserved[3]; /* reserved - must be zero */
};
-/* 11: Generic interrupt - GICC (ACPI 5.0 + ACPI 6.0 + ACPI 6.3 + ACPI 6.5 changes) */
+/* 11: Generic interrupt - GICC (ACPI 5.0 + ACPI 6.0 + ACPI 6.3 + ACPI 6.5 + ACPI 6.7 changes) */
struct acpi_madt_generic_interrupt {
struct acpi_subtable_header header;
@@ -1310,6 +1447,8 @@ struct acpi_madt_generic_interrupt {
u8 reserved2[1];
u16 spe_interrupt; /* ACPI 6.3 */
u16 trbe_interrupt; /* ACPI 6.5 */
+ u16 iaffid; /* ACPI 6.7 */
+ u32 irs_id;
};
/* Masks for Flags field above */
@@ -1332,7 +1471,7 @@ struct acpi_madt_generic_distributor {
u8 reserved2[3]; /* reserved - must be zero */
};
-/* Values for Version field above */
+/* Values for Version field above and Version field in acpi_madt_gicv5_irs */
enum acpi_madt_gic_version {
ACPI_MADT_GIC_VERSION_NONE = 0,
@@ -1340,7 +1479,8 @@ enum acpi_madt_gic_version {
ACPI_MADT_GIC_VERSION_V2 = 2,
ACPI_MADT_GIC_VERSION_V3 = 3,
ACPI_MADT_GIC_VERSION_V4 = 4,
- ACPI_MADT_GIC_VERSION_RESERVED = 5 /* 5 and greater are reserved */
+ ACPI_MADT_GIC_VERSION_V5 = 5,
+ ACPI_MADT_GIC_VERSION_RESERVED = 6 /* 6 and greater are reserved */
};
/* 13: Generic MSI Frame (ACPI 5.1) */
@@ -1611,6 +1751,41 @@ struct acpi_madt_plic {
u32 gsi_base;
};
+/* 28: Arm GICv5 IRS (ACPI 6.7) */
+struct acpi_madt_gicv5_irs {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 reserved;
+ u32 irs_id;
+ u32 flags;
+ u32 reserved2;
+ u64 config_base_address;
+ u64 setlpi_base_address;
+};
+
+#define ACPI_MADT_IRS_NON_COHERENT (1)
+
+/* 29: Arm GICv5 ITS Config Frame (ACPI 6.7) */
+struct acpi_madt_gicv5_translator {
+ struct acpi_subtable_header header;
+ u8 flags;
+ u8 reserved; /* reserved - must be zero */
+ u32 translator_id;
+ u64 base_address;
+};
+
+#define ACPI_MADT_GICV5_ITS_NON_COHERENT (1)
+
+/* 30: Arm GICv5 ITS Translate Frame (ACPI 6.7) */
+struct acpi_madt_gicv5_translate_frame {
+ struct acpi_subtable_header header;
+ u16 reserved; /* reserved - must be zero */
+ u32 linked_translator_id;
+ u32 translate_frame_id;
+ u32 reserved2;
+ u64 base_address;
+};
+
/* 80: OEM data */
struct acpi_madt_oem_data {
@@ -2826,6 +3001,15 @@ struct acpi_pptt_cache {
/* 1: Cache Type Structure for PPTT version 3 */
struct acpi_pptt_cache_v1 {
+ struct acpi_subtable_header header;
+ u16 reserved;
+ u32 flags;
+ u32 next_level_of_cache;
+ u32 size;
+ u32 number_of_sets;
+ u8 associativity;
+ u8 attributes;
+ u16 line_size;
u32 cache_id;
};
@@ -3065,6 +3249,8 @@ struct acpi_ras2_patrol_scrub_param {
u32 flags;
u32 scrub_params_out;
u32 scrub_params_in;
+ u32 ext_scrub_params;
+ u8 scrub_rate_desc[256];
};
/* Masks for Flags field above */
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 79d3aa5a4bad..7ca456e88377 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -238,6 +238,7 @@ struct acpi_srat_mem_affinity {
#define ACPI_SRAT_MEM_ENABLED (1) /* 00: Use affinity structure */
#define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */
#define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */
+#define ACPI_SRAT_MEM_SPEC_PURPOSE (1<<3) /* 03: Memory is intended for specific-purpose usage */
/* 2: Processor Local X2_APIC Affinity (ACPI 4.0) */
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index 25dd3e998727..b2e29da6ba0a 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -37,6 +37,11 @@
#define UUID_DEVICE_LABELING "e5c937d0-3553-4d7a-9117-ea4d19c3434d"
#define UUID_PHYSICAL_PRESENCE "3dddfaa6-361b-4eb4-a424-8d10089d1653"
+/* TPM */
+#define UUID_HARDWARE_INFORMATION "cf8e16a5-c1e8-4e25-b712-4f54a96702c8"
+#define UUID_START_METHOD "6bbf6cab-5463-4714-b7cd-f0203c0368d4"
+#define UUID_MEMORY_CLEAR "376054ed-cc13-4675-901c-4756d7f2d45d"
+
/* NVDIMM - NFIT table */
#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
@@ -71,4 +76,5 @@
#define UUID_USB4_CAPABILITIES "23a0d13a-26ab-486c-9c5f-0ffa525a575a"
#define UUID_1ST_FUNCTION_ID "893f00a6-660c-494e-bcfd-3043f4fb67c0"
#define UUID_2ND_FUNCTION_ID "107ededd-d381-4fd7-8da9-08e9a6c79644"
+#define UUID_FAN_TRIP_POINTS "a7611840-99fe-41ae-a488-35c75926c8eb"
#endif /* __ACUUID_H__ */
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 13fa81504844..4d644f03098e 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -39,7 +39,8 @@
/* CPPC_AUTO_ACT_WINDOW_MAX_SIG is 127, so 128 and 129 will decay to 127 when writing */
#define CPPC_AUTO_ACT_WINDOW_SIG_CARRY_THRESH 129
-#define CPPC_ENERGY_PERF_MAX (0xFF)
+#define CPPC_EPP_PERFORMANCE_PREF 0x00
+#define CPPC_EPP_ENERGY_EFFICIENCY_PREF 0xFF
/* Each register has the folowing format. */
struct cpc_reg {
@@ -119,8 +120,6 @@ struct cppc_perf_caps {
u32 lowest_nonlinear_perf;
u32 lowest_freq;
u32 nominal_freq;
- u32 energy_perf;
- bool auto_sel;
};
struct cppc_perf_ctrls {
@@ -128,6 +127,7 @@ struct cppc_perf_ctrls {
u32 min_perf;
u32 desired_perf;
u32 energy_perf;
+ bool auto_sel;
};
struct cppc_perf_fb_ctrs {
@@ -154,6 +154,7 @@ extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
+extern bool cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu);
extern bool cppc_perf_ctrs_in_pcc(void);
extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf);
extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq);
@@ -204,6 +205,10 @@ static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
{
return -EOPNOTSUPP;
}
+static inline bool cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu)
+{
+ return false;
+}
static inline bool cppc_perf_ctrs_in_pcc(void)
{
return false;
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index ebd21b05fe6e..7bea522c0657 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -21,6 +21,7 @@ struct ghes {
struct acpi_hest_generic_v2 *generic_v2;
};
struct acpi_hest_generic_status *estatus;
+ unsigned int estatus_length;
unsigned long flags;
union {
struct list_head list;
@@ -29,6 +30,7 @@ struct ghes {
};
struct device *dev;
struct list_head elist;
+ void __iomem *error_status_vaddr;
};
struct ghes_estatus_node {
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index d0eccbd920e5..7146a8e9e9c2 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -417,32 +417,15 @@ static inline void acpi_processor_throttling_init(void) {}
#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
/* in processor_idle.c */
-extern struct cpuidle_driver acpi_idle_driver;
#ifdef CONFIG_ACPI_PROCESSOR_IDLE
-int acpi_processor_power_init(struct acpi_processor *pr);
-int acpi_processor_power_exit(struct acpi_processor *pr);
+void acpi_processor_power_init(struct acpi_processor *pr);
+void acpi_processor_power_exit(struct acpi_processor *pr);
int acpi_processor_power_state_has_changed(struct acpi_processor *pr);
int acpi_processor_hotplug(struct acpi_processor *pr);
-#else
-static inline int acpi_processor_power_init(struct acpi_processor *pr)
-{
- return -ENODEV;
-}
-
-static inline int acpi_processor_power_exit(struct acpi_processor *pr)
-{
- return -ENODEV;
-}
-
-static inline int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
-{
- return -ENODEV;
-}
-
-static inline int acpi_processor_hotplug(struct acpi_processor *pr)
-{
- return -ENODEV;
-}
+void acpi_processor_register_idle_driver(void);
+void acpi_processor_unregister_idle_driver(void);
+int acpi_processor_ffh_lpi_probe(unsigned int cpu);
+int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
/* in processor_thermal.c */
@@ -465,11 +448,6 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
}
#endif /* CONFIG_CPU_FREQ */
-#ifdef CONFIG_ACPI_PROCESSOR_IDLE
-extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
-extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
-#endif
-
void acpi_processor_init_invariance_cppc(void);
#endif
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 295c94a3ccc1..9aff61e7b8f2 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -32,6 +32,7 @@ mandatory-y += irq_work.h
mandatory-y += kdebug.h
mandatory-y += kmap_size.h
mandatory-y += kprobes.h
+mandatory-y += kvm_types.h
mandatory-y += linkage.h
mandatory-y += local.h
mandatory-y += local64.h
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h
index cc840537885f..ddd90bbe40df 100644
--- a/include/asm-generic/audit_change_attr.h
+++ b/include/asm-generic/audit_change_attr.h
@@ -26,6 +26,9 @@ __NR_fremovexattr,
__NR_fchownat,
__NR_fchmodat,
#endif
+#ifdef __NR_fchmodat2
+__NR_fchmodat2,
+#endif
#ifdef __NR_chown32
__NR_chown32,
__NR_fchown32,
diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h
index 7bb7b5a83ae2..fb9991f53fb6 100644
--- a/include/asm-generic/audit_read.h
+++ b/include/asm-generic/audit_read.h
@@ -4,9 +4,15 @@ __NR_readlink,
#endif
__NR_quotactl,
__NR_listxattr,
+#ifdef __NR_listxattrat
+__NR_listxattrat,
+#endif
__NR_llistxattr,
__NR_flistxattr,
__NR_getxattr,
+#ifdef __NR_getxattrat
+__NR_getxattrat,
+#endif
__NR_lgetxattr,
__NR_fgetxattr,
#ifdef __NR_readlinkat
diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
index 0f2dcbbfee2f..5c5cf2f7fc39 100644
--- a/include/asm-generic/rqspinlock.h
+++ b/include/asm-generic/rqspinlock.h
@@ -191,7 +191,7 @@ static __always_inline int res_spin_lock(rqspinlock_t *lock)
#else
-#define res_spin_lock(lock) resilient_tas_spin_lock(lock)
+#define res_spin_lock(lock) ({ grab_held_lock_entry(lock); resilient_tas_spin_lock(lock); })
#endif /* CONFIG_QUEUED_SPINLOCKS */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 1fff717cae51..4d679d2a206b 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -46,7 +46,8 @@
*
* The mmu_gather API consists of:
*
- * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
+ * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_gather_mmu_vma() /
+ * tlb_finish_mmu()
*
* start and finish a mmu_gather
*
@@ -364,6 +365,20 @@ struct mmu_gather {
unsigned int vma_huge : 1;
unsigned int vma_pfn : 1;
+ /*
+ * Did we unshare (unmap) any shared page tables? For now only
+ * used for hugetlb PMD table sharing.
+ */
+ unsigned int unshared_tables : 1;
+
+ /*
+ * Did we unshare any page tables such that they are now exclusive
+ * and could get reused+modified by the new owner? When setting this
+ * flag, "unshared_tables" will be set as well. For now only used
+ * for hugetlb PMD table sharing.
+ */
+ unsigned int fully_unshared_tables : 1;
+
unsigned int batch_count;
#ifndef CONFIG_MMU_GATHER_NO_GATHER
@@ -400,6 +415,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
tlb->cleared_pmds = 0;
tlb->cleared_puds = 0;
tlb->cleared_p4ds = 0;
+ tlb->unshared_tables = 0;
/*
* Do not reset mmu_gather::vma_* fields here, we do not
* call into tlb_start_vma() again to set them if there is an
@@ -484,7 +500,7 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
* these bits.
*/
if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
- tlb->cleared_puds || tlb->cleared_p4ds))
+ tlb->cleared_puds || tlb->cleared_p4ds || tlb->unshared_tables))
return;
tlb_flush(tlb);
@@ -773,6 +789,63 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
}
#endif
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
+static inline void tlb_unshare_pmd_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt,
+ unsigned long addr)
+{
+ /*
+ * The caller must make sure that concurrent unsharing + exclusive
+ * reuse is impossible until tlb_flush_unshared_tables() was called.
+ */
+ VM_WARN_ON_ONCE(!ptdesc_pmd_is_shared(pt));
+ ptdesc_pmd_pts_dec(pt);
+
+ /* Clearing a PUD pointing at a PMD table with PMD leaves. */
+ tlb_flush_pmd_range(tlb, addr & PUD_MASK, PUD_SIZE);
+
+ /*
+ * If the page table is now exclusively owned, we fully unshared
+ * a page table.
+ */
+ if (!ptdesc_pmd_is_shared(pt))
+ tlb->fully_unshared_tables = true;
+ tlb->unshared_tables = true;
+}
+
+static inline void tlb_flush_unshared_tables(struct mmu_gather *tlb)
+{
+ /*
+ * As soon as the caller drops locks to allow for reuse of
+ * previously-shared tables, these tables could get modified and
+ * even reused outside of hugetlb context, so we have to make sure that
+ * any page table walkers (incl. TLB, GUP-fast) are aware of that
+ * change.
+ *
+ * Even if we are not fully unsharing a PMD table, we must
+ * flush the TLB for the unsharer now.
+ */
+ if (tlb->unshared_tables)
+ tlb_flush_mmu_tlbonly(tlb);
+
+ /*
+ * Similarly, we must make sure that concurrent GUP-fast will not
+ * walk previously-shared page tables that are getting modified+reused
+ * elsewhere. So broadcast an IPI to wait for any concurrent GUP-fast.
+ *
+ * We only perform this when we are the last sharer of a page table,
+ * as the IPI will reach all CPUs: any GUP-fast.
+ *
+ * Note that on configs where tlb_remove_table_sync_one() is a NOP,
+ * the expectation is that the tlb_flush_mmu_tlbonly() would have issued
+ * required IPIs already for us.
+ */
+ if (tlb->fully_unshared_tables) {
+ tlb_remove_table_sync_one();
+ tlb->fully_unshared_tables = false;
+ }
+}
+#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
+
#endif /* CONFIG_MMU */
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 4dbe715be65b..9865ba48c5b1 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -45,11 +45,7 @@
#endif
#ifndef cpumask_of_node
- #ifdef CONFIG_NUMA
- #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
- #else
- #define cpumask_of_node(node) ((void)(node), cpu_online_mask)
- #endif
+#define cpumask_of_node(node) ((void)(node), cpu_online_mask)
#endif
#ifndef pcibus_to_node
#define pcibus_to_node(bus) ((void)(bus), -1)
@@ -61,7 +57,7 @@
cpumask_of_node(pcibus_to_node(bus)))
#endif
-#endif /* CONFIG_NUMA */
+#endif /* !CONFIG_NUMA */
#if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8ca130af301f..eeb070f330bd 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -972,7 +972,8 @@
#define RUNTIME_CONST_VARIABLES \
RUNTIME_CONST(shift, d_hash_shift) \
RUNTIME_CONST(ptr, dentry_hashtable) \
- RUNTIME_CONST(ptr, __dentry_cache)
+ RUNTIME_CONST(ptr, __dentry_cache) \
+ RUNTIME_CONST(ptr, __names_cache)
/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
#define KUNIT_TABLE() \
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 9339da7c20a8..cbf1cc96db52 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -19,6 +19,103 @@
#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32))
/*
+ * The POWER8 VSX optimized AES assembly code is borrowed from OpenSSL and
+ * inherits OpenSSL's AES_KEY format, which stores the number of rounds after
+ * the round keys. That assembly code is difficult to change. So for
+ * compatibility purposes we reserve space for the extra nrounds field on PPC64.
+ *
+ * Note: when prepared for decryption, the round keys are just the reversed
+ * standard round keys, not the round keys for the Equivalent Inverse Cipher.
+ */
+struct p8_aes_key {
+ u32 rndkeys[AES_MAX_KEYLENGTH_U32];
+ int nrounds;
+};
+
+union aes_enckey_arch {
+ u32 rndkeys[AES_MAX_KEYLENGTH_U32];
+#ifdef CONFIG_CRYPTO_LIB_AES_ARCH
+#if defined(CONFIG_PPC) && defined(CONFIG_SPE)
+ /* Used unconditionally (when SPE AES code is enabled in kconfig) */
+ u32 spe_enc_key[AES_MAX_KEYLENGTH_U32] __aligned(8);
+#elif defined(CONFIG_PPC)
+ /*
+ * Kernels that include the POWER8 VSX optimized AES code use this field
+ * when that code is usable at key preparation time. Otherwise they
+ * fall back to rndkeys. In the latter case, p8.nrounds (which doesn't
+ * overlap rndkeys) is set to 0 to differentiate the two formats.
+ */
+ struct p8_aes_key p8;
+#elif defined(CONFIG_S390)
+ /* Used when the CPU supports CPACF AES for this key's length */
+ u8 raw_key[AES_MAX_KEY_SIZE];
+#elif defined(CONFIG_SPARC64)
+ /* Used when the CPU supports the SPARC64 AES opcodes */
+ u64 sparc_rndkeys[AES_MAX_KEYLENGTH / sizeof(u64)];
+#endif
+#endif /* CONFIG_CRYPTO_LIB_AES_ARCH */
+};
+
+union aes_invkey_arch {
+ u32 inv_rndkeys[AES_MAX_KEYLENGTH_U32];
+#ifdef CONFIG_CRYPTO_LIB_AES_ARCH
+#if defined(CONFIG_PPC) && defined(CONFIG_SPE)
+ /* Used unconditionally (when SPE AES code is enabled in kconfig) */
+ u32 spe_dec_key[AES_MAX_KEYLENGTH_U32] __aligned(8);
+#elif defined(CONFIG_PPC)
+ /* Used conditionally, analogous to aes_enckey_arch::p8 */
+ struct p8_aes_key p8;
+#endif
+#endif /* CONFIG_CRYPTO_LIB_AES_ARCH */
+};
+
+/**
+ * struct aes_enckey - An AES key prepared for encryption
+ * @len: Key length in bytes: 16 for AES-128, 24 for AES-192, 32 for AES-256.
+ * @nrounds: Number of rounds: 10 for AES-128, 12 for AES-192, 14 for AES-256.
+ * This is '6 + @len / 4' and is cached so that AES implementations
+ * that need it don't have to recompute it for each en/decryption.
+ * @padding: Padding to make offsetof(@k) be a multiple of 16, so that aligning
+ * this struct to a 16-byte boundary results in @k also being 16-byte
+ * aligned. Users aren't required to align this struct to 16 bytes,
+ * but it may slightly improve performance.
+ * @k: This typically contains the AES round keys as an array of '@nrounds + 1'
+ * groups of four u32 words. However, architecture-specific implementations
+ * of AES may store something else here, e.g. just the raw key if it's all
+ * they need.
+ *
+ * Note that this struct is about half the size of struct aes_key. This is
+ * separate from struct aes_key so that modes that need only AES encryption
+ * (e.g. AES-GCM, AES-CTR, AES-CMAC, tweak key in AES-XTS) don't incur the time
+ * and space overhead of computing and caching the decryption round keys.
+ *
+ * Note that there's no decryption-only equivalent (i.e. "struct aes_deckey"),
+ * since (a) it's rare that modes need decryption-only, and (b) some AES
+ * implementations use the same @k for both encryption and decryption, either
+ * always or conditionally; in the latter case both @k and @inv_k are needed.
+ */
+struct aes_enckey {
+ u32 len;
+ u32 nrounds;
+ u32 padding[2];
+ union aes_enckey_arch k;
+};
+
+/**
+ * struct aes_key - An AES key prepared for encryption and decryption
+ * @aes_enckey: Common fields and the key prepared for encryption
+ * @inv_k: This generally contains the round keys for the AES Equivalent
+ * Inverse Cipher, as an array of '@nrounds + 1' groups of four u32
+ * words. However, architecture-specific implementations of AES may
+ * store something else here. For example, they may leave this field
+ * uninitialized if they use @k for both encryption and decryption.
+ */
+struct aes_key {
+ struct aes_enckey; /* Include all fields of aes_enckey. */
+ union aes_invkey_arch inv_k;
+};
+
+/*
* Please ensure that the first two fields are 16-byte aligned
* relative to the start of the structure, i.e., don't move them!
*/
@@ -28,13 +125,10 @@ struct crypto_aes_ctx {
u32 key_length;
};
-extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned;
-extern const u32 crypto_it_tab[4][256] ____cacheline_aligned;
-
/*
* validate key length for AES algorithms
*/
-static inline int aes_check_keylen(unsigned int keylen)
+static inline int aes_check_keylen(size_t keylen)
{
switch (keylen) {
case AES_KEYSIZE_128:
@@ -48,9 +142,6 @@ static inline int aes_check_keylen(unsigned int keylen)
return 0;
}
-int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len);
-
/**
* aes_expandkey - Expands the AES key as described in FIPS-197
* @ctx: The location where the computed key will be stored.
@@ -68,28 +159,177 @@ int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
+/*
+ * The following functions are temporarily exported for use by the AES mode
+ * implementations in arch/$(SRCARCH)/crypto/. These exports will go away when
+ * that code is migrated into lib/crypto/.
+ */
+#ifdef CONFIG_ARM64
+int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ unsigned int key_len);
+#elif defined(CONFIG_PPC)
+void ppc_expand_key_128(u32 *key_enc, const u8 *key);
+void ppc_expand_key_192(u32 *key_enc, const u8 *key);
+void ppc_expand_key_256(u32 *key_enc, const u8 *key);
+void ppc_generate_decrypt_key(u32 *key_dec, u32 *key_enc, unsigned int key_len);
+void ppc_encrypt_ecb(u8 *out, const u8 *in, u32 *key_enc, u32 rounds,
+ u32 bytes);
+void ppc_decrypt_ecb(u8 *out, const u8 *in, u32 *key_dec, u32 rounds,
+ u32 bytes);
+void ppc_encrypt_cbc(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes,
+ u8 *iv);
+void ppc_decrypt_cbc(u8 *out, const u8 *in, u32 *key_dec, u32 rounds, u32 bytes,
+ u8 *iv);
+void ppc_crypt_ctr(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes,
+ u8 *iv);
+void ppc_encrypt_xts(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes,
+ u8 *iv, u32 *key_twk);
+void ppc_decrypt_xts(u8 *out, const u8 *in, u32 *key_dec, u32 rounds, u32 bytes,
+ u8 *iv, u32 *key_twk);
+int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
+ struct p8_aes_key *key);
+int aes_p8_set_decrypt_key(const u8 *userKey, const int bits,
+ struct p8_aes_key *key);
+void aes_p8_encrypt(const u8 *in, u8 *out, const struct p8_aes_key *key);
+void aes_p8_decrypt(const u8 *in, u8 *out, const struct p8_aes_key *key);
+void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len,
+ const struct p8_aes_key *key, u8 *iv, const int enc);
+void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out, size_t len,
+ const struct p8_aes_key *key, const u8 *iv);
+void aes_p8_xts_encrypt(const u8 *in, u8 *out, size_t len,
+ const struct p8_aes_key *key1,
+ const struct p8_aes_key *key2, u8 *iv);
+void aes_p8_xts_decrypt(const u8 *in, u8 *out, size_t len,
+ const struct p8_aes_key *key1,
+ const struct p8_aes_key *key2, u8 *iv);
+#elif defined(CONFIG_SPARC64)
+void aes_sparc64_key_expand(const u32 *in_key, u64 *output_key,
+ unsigned int key_len);
+void aes_sparc64_load_encrypt_keys_128(const u64 *key);
+void aes_sparc64_load_encrypt_keys_192(const u64 *key);
+void aes_sparc64_load_encrypt_keys_256(const u64 *key);
+void aes_sparc64_load_decrypt_keys_128(const u64 *key);
+void aes_sparc64_load_decrypt_keys_192(const u64 *key);
+void aes_sparc64_load_decrypt_keys_256(const u64 *key);
+void aes_sparc64_ecb_encrypt_128(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len);
+void aes_sparc64_ecb_encrypt_192(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len);
+void aes_sparc64_ecb_encrypt_256(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len);
+void aes_sparc64_ecb_decrypt_128(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len);
+void aes_sparc64_ecb_decrypt_192(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len);
+void aes_sparc64_ecb_decrypt_256(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len);
+void aes_sparc64_cbc_encrypt_128(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_cbc_encrypt_192(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_cbc_encrypt_256(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_cbc_decrypt_128(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_cbc_decrypt_192(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_cbc_decrypt_256(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_ctr_crypt_128(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_ctr_crypt_192(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_ctr_crypt_256(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+#endif
+
/**
- * aes_encrypt - Encrypt a single AES block
- * @ctx: Context struct containing the key schedule
- * @out: Buffer to store the ciphertext
- * @in: Buffer containing the plaintext
+ * aes_preparekey() - Prepare an AES key for encryption and decryption
+ * @key: (output) The key structure to initialize
+ * @in_key: The raw AES key
+ * @key_len: Length of the raw key in bytes. Should be either AES_KEYSIZE_128,
+ * AES_KEYSIZE_192, or AES_KEYSIZE_256.
+ *
+ * This prepares an AES key for both the encryption and decryption directions of
+ * the block cipher. Typically this involves expanding the raw key into both
+ * the standard round keys and the Equivalent Inverse Cipher round keys, but
+ * some architecture-specific implementations don't do the full expansion here.
+ *
+ * The caller is responsible for zeroizing both the struct aes_key and the raw
+ * key once they are no longer needed.
+ *
+ * If you don't need decryption support, use aes_prepareenckey() instead.
+ *
+ * Return: 0 on success or -EINVAL if the given key length is invalid. No other
+ * errors are possible, so callers that always pass a valid key length
+ * don't need to check for errors.
+ *
+ * Context: Any context.
*/
-void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
+int aes_preparekey(struct aes_key *key, const u8 *in_key, size_t key_len);
/**
- * aes_decrypt - Decrypt a single AES block
- * @ctx: Context struct containing the key schedule
- * @out: Buffer to store the plaintext
- * @in: Buffer containing the ciphertext
+ * aes_prepareenckey() - Prepare an AES key for encryption-only
+ * @key: (output) The key structure to initialize
+ * @in_key: The raw AES key
+ * @key_len: Length of the raw key in bytes. Should be either AES_KEYSIZE_128,
+ * AES_KEYSIZE_192, or AES_KEYSIZE_256.
+ *
+ * This prepares an AES key for only the encryption direction of the block
+ * cipher. Typically this involves expanding the raw key into only the standard
+ * round keys, resulting in a struct about half the size of struct aes_key.
+ *
+ * The caller is responsible for zeroizing both the struct aes_enckey and the
+ * raw key once they are no longer needed.
+ *
+ * Note that while the resulting prepared key supports only AES encryption, it
+ * can still be used for decrypting in a mode of operation that uses AES in only
+ * the encryption (forward) direction, for example counter mode.
+ *
+ * Return: 0 on success or -EINVAL if the given key length is invalid. No other
+ * errors are possible, so callers that always pass a valid key length
+ * don't need to check for errors.
+ *
+ * Context: Any context.
+ */
+int aes_prepareenckey(struct aes_enckey *key, const u8 *in_key, size_t key_len);
+
+typedef union {
+ const struct aes_enckey *enc_key;
+ const struct aes_key *full_key;
+} aes_encrypt_arg __attribute__ ((__transparent_union__));
+
+/**
+ * aes_encrypt() - Encrypt a single AES block
+ * @key: The AES key, as a pointer to either an encryption-only key
+ * (struct aes_enckey) or a full, bidirectional key (struct aes_key).
+ * @out: Buffer to store the ciphertext block
+ * @in: Buffer containing the plaintext block
+ *
+ * Context: Any context.
+ */
+void aes_encrypt(aes_encrypt_arg key, u8 out[at_least AES_BLOCK_SIZE],
+ const u8 in[at_least AES_BLOCK_SIZE]);
+
+/**
+ * aes_decrypt() - Decrypt a single AES block
+ * @key: The AES key, previously initialized by aes_preparekey()
+ * @out: Buffer to store the plaintext block
+ * @in: Buffer containing the ciphertext block
+ *
+ * Context: Any context.
*/
-void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
+void aes_decrypt(const struct aes_key *key, u8 out[at_least AES_BLOCK_SIZE],
+ const u8 in[at_least AES_BLOCK_SIZE]);
extern const u8 crypto_aes_sbox[];
extern const u8 crypto_aes_inv_sbox[];
+extern const u32 aes_enc_tab[256];
+extern const u32 aes_dec_tab[256];
-void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+void aescfb_encrypt(const struct aes_enckey *key, u8 *dst, const u8 *src,
int len, const u8 iv[AES_BLOCK_SIZE]);
-void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+void aescfb_decrypt(const struct aes_enckey *key, u8 *dst, const u8 *src,
int len, const u8 iv[AES_BLOCK_SIZE]);
#endif
diff --git a/include/crypto/df_sp80090a.h b/include/crypto/df_sp80090a.h
index 6b25305fe611..cb5d6fe15d40 100644
--- a/include/crypto/df_sp80090a.h
+++ b/include/crypto/df_sp80090a.h
@@ -18,7 +18,7 @@ static inline int crypto_drbg_ctr_df_datalen(u8 statelen, u8 blocklen)
statelen + blocklen; /* temp */
}
-int crypto_drbg_ctr_df(struct crypto_aes_ctx *aes,
+int crypto_drbg_ctr_df(struct aes_enckey *aes,
unsigned char *df_data,
size_t bytes_to_return,
struct list_head *seedlist,
diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h
index fd9df607a836..b524e47bd4d0 100644
--- a/include/crypto/gcm.h
+++ b/include/crypto/gcm.h
@@ -66,7 +66,7 @@ static inline int crypto_ipsec_check_assoclen(unsigned int assoclen)
struct aesgcm_ctx {
be128 ghash_key;
- struct crypto_aes_ctx aes_ctx;
+ struct aes_enckey aes_key;
unsigned int authsize;
};
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 2d97440028ff..9a3f28baa804 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -191,11 +191,12 @@ static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
-struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
- struct crypto_acomp_streams *s) __acquires(stream);
+#define crypto_acomp_lock_stream_bh(...) __acquire_ret(_crypto_acomp_lock_stream_bh(__VA_ARGS__), &__ret->lock);
+struct crypto_acomp_stream *_crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires_ret;
static inline void crypto_acomp_unlock_stream_bh(
- struct crypto_acomp_stream *stream) __releases(stream)
+ struct crypto_acomp_stream *stream) __releases(&stream->lock)
{
spin_unlock_bh(&stream->lock);
}
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
index f19ef376833f..6a1d27880615 100644
--- a/include/crypto/internal/engine.h
+++ b/include/crypto/internal/engine.h
@@ -45,7 +45,7 @@ struct crypto_engine {
struct list_head list;
spinlock_t queue_lock;
- struct crypto_queue queue;
+ struct crypto_queue queue __guarded_by(&queue_lock);
struct device *dev;
struct kthread_worker *kworker;
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 0cad8e7364c8..a965b6aabf61 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -242,6 +242,13 @@ static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm)
return crypto_tfm_ctx_dma(&tfm->base);
}
+static inline bool crypto_skcipher_tested(struct crypto_skcipher *tfm)
+{
+ struct crypto_tfm *tfm_base = crypto_skcipher_tfm(tfm);
+
+ return tfm_base->__crt_alg->cra_flags & CRYPTO_ALG_TESTED;
+}
+
static inline void *skcipher_request_ctx(struct skcipher_request *req)
{
return req->__ctx;
diff --git a/include/crypto/mldsa.h b/include/crypto/mldsa.h
new file mode 100644
index 000000000000..3ef2676787c9
--- /dev/null
+++ b/include/crypto/mldsa.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Support for verifying ML-DSA signatures
+ *
+ * Copyright 2025 Google LLC
+ */
+#ifndef _CRYPTO_MLDSA_H
+#define _CRYPTO_MLDSA_H
+
+#include <linux/types.h>
+
+/* Identifier for an ML-DSA parameter set */
+enum mldsa_alg {
+ MLDSA44, /* ML-DSA-44 */
+ MLDSA65, /* ML-DSA-65 */
+ MLDSA87, /* ML-DSA-87 */
+};
+
+/* Lengths of ML-DSA public keys and signatures in bytes */
+#define MLDSA44_PUBLIC_KEY_SIZE 1312
+#define MLDSA65_PUBLIC_KEY_SIZE 1952
+#define MLDSA87_PUBLIC_KEY_SIZE 2592
+#define MLDSA44_SIGNATURE_SIZE 2420
+#define MLDSA65_SIGNATURE_SIZE 3309
+#define MLDSA87_SIGNATURE_SIZE 4627
+
+/**
+ * mldsa_verify() - Verify an ML-DSA signature
+ * @alg: The ML-DSA parameter set to use
+ * @sig: The signature
+ * @sig_len: Length of the signature in bytes. Should match the
+ * MLDSA*_SIGNATURE_SIZE constant associated with @alg,
+ * otherwise -EBADMSG will be returned.
+ * @msg: The message
+ * @msg_len: Length of the message in bytes
+ * @pk: The public key
+ * @pk_len: Length of the public key in bytes. Should match the
+ * MLDSA*_PUBLIC_KEY_SIZE constant associated with @alg,
+ * otherwise -EBADMSG will be returned.
+ *
+ * This verifies a signature using pure ML-DSA with the specified parameter set.
+ * The context string is assumed to be empty. This corresponds to FIPS 204
+ * Algorithm 3 "ML-DSA.Verify" with the ctx parameter set to the empty string
+ * and the lengths of the signature and key given explicitly by the caller.
+ *
+ * Context: Might sleep
+ *
+ * Return:
+ * * 0 if the signature is valid
+ * * -EBADMSG if the signature and/or public key is malformed
+ * * -EKEYREJECTED if the signature is invalid but otherwise well-formed
+ * * -ENOMEM if out of memory so the validity of the signature is unknown
+ */
+int mldsa_verify(enum mldsa_alg alg, const u8 *sig, size_t sig_len,
+ const u8 *msg, size_t msg_len, const u8 *pk, size_t pk_len);
+
+#if IS_ENABLED(CONFIG_CRYPTO_LIB_MLDSA_KUNIT_TEST)
+/* Internal function, exposed only for unit testing */
+s32 mldsa_use_hint(u8 h, s32 r, s32 gamma2);
+#endif
+
+#endif /* _CRYPTO_MLDSA_H */
diff --git a/include/crypto/nh.h b/include/crypto/nh.h
new file mode 100644
index 000000000000..465e85bf203f
--- /dev/null
+++ b/include/crypto/nh.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NH hash function for Adiantum
+ */
+
+#ifndef _CRYPTO_NH_H
+#define _CRYPTO_NH_H
+
+#include <linux/types.h>
+
+/* NH parameterization: */
+
+/* Endianness: little */
+/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */
+
+/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */
+#define NH_PAIR_STRIDE 2
+#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32))
+
+/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */
+#define NH_NUM_PASSES 4
+#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64))
+
+/* Max message size: 1024 bytes (32x compression factor) */
+#define NH_NUM_STRIDES 64
+#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES)
+#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32))
+#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \
+ NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1))
+#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32))
+
+/**
+ * nh() - NH hash function for Adiantum
+ * @key: The key. @message_len + 48 bytes of it are used. This is NH_KEY_BYTES
+ * if @message_len has its maximum length of NH_MESSAGE_BYTES.
+ * @message: The message
+ * @message_len: The message length in bytes. Must be a multiple of 16
+ * (NH_MESSAGE_UNIT) and at most 1024 (NH_MESSAGE_BYTES).
+ * @hash: (output) The resulting hash value
+ *
+ * Note: the pseudocode for NH in the Adiantum paper iterates over 1024-byte
+ * segments of the message, computes a 32-byte hash for each, and returns all
+ * the hashes concatenated together. In contrast, this function just hashes one
+ * segment and returns one hash. It's the caller's responsibility to call this
+ * function for each 1024-byte segment and collect all the hashes.
+ *
+ * Context: Any context.
+ */
+void nh(const u32 *key, const u8 *message, size_t message_len,
+ __le64 hash[NH_NUM_PASSES]);
+
+#endif /* _CRYPTO_NH_H */
diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h
deleted file mode 100644
index 306925fea190..000000000000
--- a/include/crypto/nhpoly1305.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Common values and helper functions for the NHPoly1305 hash function.
- */
-
-#ifndef _NHPOLY1305_H
-#define _NHPOLY1305_H
-
-#include <crypto/hash.h>
-#include <crypto/internal/poly1305.h>
-
-/* NH parameterization: */
-
-/* Endianness: little */
-/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */
-
-/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */
-#define NH_PAIR_STRIDE 2
-#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32))
-
-/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */
-#define NH_NUM_PASSES 4
-#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64))
-
-/* Max message size: 1024 bytes (32x compression factor) */
-#define NH_NUM_STRIDES 64
-#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES)
-#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32))
-#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \
- NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1))
-#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32))
-
-#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES)
-
-struct nhpoly1305_key {
- struct poly1305_core_key poly_key;
- u32 nh_key[NH_KEY_WORDS];
-};
-
-struct nhpoly1305_state {
-
- /* Running total of polynomial evaluation */
- struct poly1305_state poly_state;
-
- /* Partial block buffer */
- u8 buffer[NH_MESSAGE_UNIT];
- unsigned int buflen;
-
- /*
- * Number of bytes remaining until the current NH message reaches
- * NH_MESSAGE_BYTES. When nonzero, 'nh_hash' holds the partial NH hash.
- */
- unsigned int nh_remaining;
-
- __le64 nh_hash[NH_NUM_PASSES];
-};
-
-typedef void (*nh_t)(const u32 *key, const u8 *message, size_t message_len,
- __le64 hash[NH_NUM_PASSES]);
-
-int crypto_nhpoly1305_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen);
-
-int crypto_nhpoly1305_init(struct shash_desc *desc);
-int crypto_nhpoly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen);
-int crypto_nhpoly1305_update_helper(struct shash_desc *desc,
- const u8 *src, unsigned int srclen,
- nh_t nh_fn);
-int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst);
-int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst,
- nh_t nh_fn);
-
-#endif /* _NHPOLY1305_H */
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 81098e00c08f..4c5199b20338 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -43,9 +43,11 @@ extern void public_key_free(struct public_key *key);
struct public_key_signature {
struct asymmetric_key_id *auth_ids[3];
u8 *s; /* Signature */
- u8 *digest;
+ u8 *m; /* Message data to pass to verifier */
u32 s_size; /* Number of bytes in signature */
- u32 digest_size; /* Number of bytes in digest */
+ u32 m_size; /* Number of bytes in ->m */
+ bool m_free; /* T if ->m needs freeing */
+ bool algo_takes_data; /* T if public key algo operates on data, not a hash */
const char *pkey_algo;
const char *hash_algo;
const char *encoding;
diff --git a/include/cxl/event.h b/include/cxl/event.h
index 6fd90f9cc203..ff97fea718d2 100644
--- a/include/cxl/event.h
+++ b/include/cxl/event.h
@@ -320,4 +320,26 @@ static inline int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data
}
#endif
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+int cxl_cper_sec_prot_err_valid(struct cxl_cper_sec_prot_err *prot_err);
+int cxl_cper_setup_prot_err_work_data(struct cxl_cper_prot_err_work_data *wd,
+ struct cxl_cper_sec_prot_err *prot_err,
+ int severity);
+#else
+static inline int
+cxl_cper_sec_prot_err_valid(struct cxl_cper_sec_prot_err *prot_err)
+{
+ return -EOPNOTSUPP;
+}
+static inline int
+cxl_cper_setup_prot_err_work_data(struct cxl_cper_prot_err_work_data *wd,
+ struct cxl_cper_sec_prot_err *prot_err,
+ int severity)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *wd);
+
#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/drm/bridge/dw_hdmi_qp.h b/include/drm/bridge/dw_hdmi_qp.h
index 3f461f6b9bbf..3af12f82da2c 100644
--- a/include/drm/bridge/dw_hdmi_qp.h
+++ b/include/drm/bridge/dw_hdmi_qp.h
@@ -34,5 +34,6 @@ struct dw_hdmi_qp_plat_data {
struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
struct drm_encoder *encoder,
const struct dw_hdmi_qp_plat_data *plat_data);
+void dw_hdmi_qp_suspend(struct device *dev, struct dw_hdmi_qp *hdmi);
void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi);
#endif /* __DW_HDMI_QP__ */
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index df2f24b950e4..14d2859f0bda 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -552,6 +552,22 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
/**
+ * drm_dp_dpcd_readb() - read a single byte from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to read
+ * @valuep: location where the value of the register will be stored
+ *
+ * Returns the number of bytes transferred (1) on success, or a negative
+ * error code on failure. In most of the cases you should be using
+ * drm_dp_dpcd_read_byte() instead.
+ */
+static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
+ unsigned int offset, u8 *valuep)
+{
+ return drm_dp_dpcd_read(aux, offset, valuep, 1);
+}
+
+/**
* drm_dp_dpcd_read_data() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
@@ -570,12 +586,29 @@ static inline int drm_dp_dpcd_read_data(struct drm_dp_aux *aux,
void *buffer, size_t size)
{
int ret;
+ size_t i;
+ u8 *buf = buffer;
ret = drm_dp_dpcd_read(aux, offset, buffer, size);
- if (ret < 0)
- return ret;
- if (ret < size)
- return -EPROTO;
+ if (ret >= 0) {
+ if (ret < size)
+ return -EPROTO;
+ return 0;
+ }
+
+ /*
+ * Workaround for USB-C hubs/adapters with buggy firmware that fail
+ * multi-byte AUX reads but work with single-byte reads.
+ * Known affected devices:
+ * - Lenovo USB-C to VGA adapter (VIA VL817, idVendor=17ef, idProduct=7217)
+ * - Dell DA310 USB-C hub (idVendor=413c, idProduct=c010)
+ * Attempt byte-by-byte reading as a fallback.
+ */
+ for (i = 0; i < size; i++) {
+ ret = drm_dp_dpcd_readb(aux, offset + i, &buf[i]);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
@@ -610,22 +643,6 @@ static inline int drm_dp_dpcd_write_data(struct drm_dp_aux *aux,
}
/**
- * drm_dp_dpcd_readb() - read a single byte from the DPCD
- * @aux: DisplayPort AUX channel
- * @offset: address of the register to read
- * @valuep: location where the value of the register will be stored
- *
- * Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure. In most of the cases you should be using
- * drm_dp_dpcd_read_byte() instead.
- */
-static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
- unsigned int offset, u8 *valuep)
-{
- return drm_dp_dpcd_read(aux, offset, valuep, 1);
-}
-
-/**
* drm_dp_dpcd_writeb() - write a single byte to the DPCD
* @aux: DisplayPort AUX channel
* @offset: address of the register to write
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 53382fe93537..e154ee4f0696 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -60,6 +60,12 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
int drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state);
+void drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_crtc_disable(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev,
+ struct drm_atomic_state *state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_tail(struct drm_atomic_state *state);
@@ -89,8 +95,24 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
void
drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state);
+void drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
struct drm_atomic_state *state);
+
+void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+void drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+void drm_atomic_helper_commit_crtc_enable(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+void drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 0ff7ab4aa868..dbafe136833f 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -176,33 +176,17 @@ struct drm_bridge_funcs {
/**
* @disable:
*
- * The @disable callback should disable the bridge.
+ * This callback should disable the bridge. It is called right before
+ * the preceding element in the display pipe is disabled. If the
+ * preceding element is a bridge this means it's called before that
+ * bridge's @disable vfunc. If the preceding element is a &drm_encoder
+ * it's called right before the &drm_encoder_helper_funcs.disable,
+ * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms
+ * hook.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
- *
- * If the preceding element is a &drm_bridge, then this is called before
- * that bridge is disabled via one of:
- *
- * - &drm_bridge_funcs.disable
- * - &drm_bridge_funcs.atomic_disable
- *
- * If the preceding element of the bridge is a display controller, then
- * this callback is called before the encoder is disabled via one of:
- *
- * - &drm_encoder_helper_funcs.atomic_disable
- * - &drm_encoder_helper_funcs.prepare
- * - &drm_encoder_helper_funcs.disable
- * - &drm_encoder_helper_funcs.dpms
- *
- * and the CRTC is disabled via one of:
- *
- * - &drm_crtc_helper_funcs.prepare
- * - &drm_crtc_helper_funcs.atomic_disable
- * - &drm_crtc_helper_funcs.disable
- * - &drm_crtc_helper_funcs.dpms.
- *
* The @disable callback is optional.
*
* NOTE:
@@ -215,34 +199,17 @@ struct drm_bridge_funcs {
/**
* @post_disable:
*
- * The bridge must assume that the display pipe (i.e. clocks and timing
- * signals) feeding this bridge is no longer running when the
- * @post_disable is called.
+ * This callback should disable the bridge. It is called right after the
+ * preceding element in the display pipe is disabled. If the preceding
+ * element is a bridge this means it's called after that bridge's
+ * @post_disable function. If the preceding element is a &drm_encoder
+ * it's called right after the encoder's
+ * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare
+ * or &drm_encoder_helper_funcs.dpms hook.
*
- * This callback should perform all the actions required by the hardware
- * after it has stopped receiving signals from the preceding element.
- *
- * If the preceding element is a &drm_bridge, then this is called after
- * that bridge is post-disabled (unless marked otherwise by the
- * @pre_enable_prev_first flag) via one of:
- *
- * - &drm_bridge_funcs.post_disable
- * - &drm_bridge_funcs.atomic_post_disable
- *
- * If the preceding element of the bridge is a display controller, then
- * this callback is called after the encoder is disabled via one of:
- *
- * - &drm_encoder_helper_funcs.atomic_disable
- * - &drm_encoder_helper_funcs.prepare
- * - &drm_encoder_helper_funcs.disable
- * - &drm_encoder_helper_funcs.dpms
- *
- * and the CRTC is disabled via one of:
- *
- * - &drm_crtc_helper_funcs.prepare
- * - &drm_crtc_helper_funcs.atomic_disable
- * - &drm_crtc_helper_funcs.disable
- * - &drm_crtc_helper_funcs.dpms
+ * The bridge must assume that the display pipe (i.e. clocks and timing
+ * signals) feeding it is no longer running when this callback is
+ * called.
*
* The @post_disable callback is optional.
*
@@ -285,30 +252,18 @@ struct drm_bridge_funcs {
/**
* @pre_enable:
*
- * The display pipe (i.e. clocks and timing signals) feeding this bridge
- * will not yet be running when the @pre_enable is called.
- *
- * This callback should perform all the necessary actions to prepare the
- * bridge to accept signals from the preceding element.
- *
- * If the preceding element is a &drm_bridge, then this is called before
- * that bridge is pre-enabled (unless marked otherwise by
- * @pre_enable_prev_first flag) via one of:
- *
- * - &drm_bridge_funcs.pre_enable
- * - &drm_bridge_funcs.atomic_pre_enable
- *
- * If the preceding element of the bridge is a display controller, then
- * this callback is called before the CRTC is enabled via one of:
- *
- * - &drm_crtc_helper_funcs.atomic_enable
- * - &drm_crtc_helper_funcs.commit
- *
- * and the encoder is enabled via one of:
+ * This callback should enable the bridge. It is called right before
+ * the preceding element in the display pipe is enabled. If the
+ * preceding element is a bridge this means it's called before that
+ * bridge's @pre_enable function. If the preceding element is a
+ * &drm_encoder it's called right before the encoder's
+ * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
+ * &drm_encoder_helper_funcs.dpms hook.
*
- * - &drm_encoder_helper_funcs.atomic_enable
- * - &drm_encoder_helper_funcs.enable
- * - &drm_encoder_helper_funcs.commit
+ * The display pipe (i.e. clocks and timing signals) feeding this bridge
+ * will not yet be running when this callback is called. The bridge must
+ * not enable the display link feeding the next bridge in the chain (if
+ * there is one) when this callback is called.
*
* The @pre_enable callback is optional.
*
@@ -322,31 +277,19 @@ struct drm_bridge_funcs {
/**
* @enable:
*
- * The @enable callback should enable the bridge.
+ * This callback should enable the bridge. It is called right after
+ * the preceding element in the display pipe is enabled. If the
+ * preceding element is a bridge this means it's called after that
+ * bridge's @enable function. If the preceding element is a
+ * &drm_encoder it's called right after the encoder's
+ * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
+ * &drm_encoder_helper_funcs.dpms hook.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
- * If the preceding element is a &drm_bridge, then this is called after
- * that bridge is enabled via one of:
- *
- * - &drm_bridge_funcs.enable
- * - &drm_bridge_funcs.atomic_enable
- *
- * If the preceding element of the bridge is a display controller, then
- * this callback is called after the CRTC is enabled via one of:
- *
- * - &drm_crtc_helper_funcs.atomic_enable
- * - &drm_crtc_helper_funcs.commit
- *
- * and the encoder is enabled via one of:
- *
- * - &drm_encoder_helper_funcs.atomic_enable
- * - &drm_encoder_helper_funcs.enable
- * - drm_encoder_helper_funcs.commit
- *
* The @enable callback is optional.
*
* NOTE:
@@ -359,30 +302,17 @@ struct drm_bridge_funcs {
/**
* @atomic_pre_enable:
*
- * The display pipe (i.e. clocks and timing signals) feeding this bridge
- * will not yet be running when the @atomic_pre_enable is called.
- *
- * This callback should perform all the necessary actions to prepare the
- * bridge to accept signals from the preceding element.
- *
- * If the preceding element is a &drm_bridge, then this is called before
- * that bridge is pre-enabled (unless marked otherwise by
- * @pre_enable_prev_first flag) via one of:
- *
- * - &drm_bridge_funcs.pre_enable
- * - &drm_bridge_funcs.atomic_pre_enable
+ * This callback should enable the bridge. It is called right before
+ * the preceding element in the display pipe is enabled. If the
+ * preceding element is a bridge this means it's called before that
+ * bridge's @atomic_pre_enable or @pre_enable function. If the preceding
+ * element is a &drm_encoder it's called right before the encoder's
+ * &drm_encoder_helper_funcs.atomic_enable hook.
*
- * If the preceding element of the bridge is a display controller, then
- * this callback is called before the CRTC is enabled via one of:
- *
- * - &drm_crtc_helper_funcs.atomic_enable
- * - &drm_crtc_helper_funcs.commit
- *
- * and the encoder is enabled via one of:
- *
- * - &drm_encoder_helper_funcs.atomic_enable
- * - &drm_encoder_helper_funcs.enable
- * - &drm_encoder_helper_funcs.commit
+ * The display pipe (i.e. clocks and timing signals) feeding this bridge
+ * will not yet be running when this callback is called. The bridge must
+ * not enable the display link feeding the next bridge in the chain (if
+ * there is one) when this callback is called.
*
* The @atomic_pre_enable callback is optional.
*/
@@ -392,31 +322,18 @@ struct drm_bridge_funcs {
/**
* @atomic_enable:
*
- * The @atomic_enable callback should enable the bridge.
+ * This callback should enable the bridge. It is called right after
+ * the preceding element in the display pipe is enabled. If the
+ * preceding element is a bridge this means it's called after that
+ * bridge's @atomic_enable or @enable function. If the preceding element
+ * is a &drm_encoder it's called right after the encoder's
+ * &drm_encoder_helper_funcs.atomic_enable hook.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
- * If the preceding element is a &drm_bridge, then this is called after
- * that bridge is enabled via one of:
- *
- * - &drm_bridge_funcs.enable
- * - &drm_bridge_funcs.atomic_enable
- *
- * If the preceding element of the bridge is a display controller, then
- * this callback is called after the CRTC is enabled via one of:
- *
- * - &drm_crtc_helper_funcs.atomic_enable
- * - &drm_crtc_helper_funcs.commit
- *
- * and the encoder is enabled via one of:
- *
- * - &drm_encoder_helper_funcs.atomic_enable
- * - &drm_encoder_helper_funcs.enable
- * - drm_encoder_helper_funcs.commit
- *
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
@@ -424,32 +341,16 @@ struct drm_bridge_funcs {
/**
* @atomic_disable:
*
- * The @atomic_disable callback should disable the bridge.
+ * This callback should disable the bridge. It is called right before
+ * the preceding element in the display pipe is disabled. If the
+ * preceding element is a bridge this means it's called before that
+ * bridge's @atomic_disable or @disable vfunc. If the preceding element
+ * is a &drm_encoder it's called right before the
+ * &drm_encoder_helper_funcs.atomic_disable hook.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
- * If the preceding element is a &drm_bridge, then this is called before
- * that bridge is disabled via one of:
- *
- * - &drm_bridge_funcs.disable
- * - &drm_bridge_funcs.atomic_disable
- *
- * If the preceding element of the bridge is a display controller, then
- * this callback is called before the encoder is disabled via one of:
- *
- * - &drm_encoder_helper_funcs.atomic_disable
- * - &drm_encoder_helper_funcs.prepare
- * - &drm_encoder_helper_funcs.disable
- * - &drm_encoder_helper_funcs.dpms
- *
- * and the CRTC is disabled via one of:
- *
- * - &drm_crtc_helper_funcs.prepare
- * - &drm_crtc_helper_funcs.atomic_disable
- * - &drm_crtc_helper_funcs.disable
- * - &drm_crtc_helper_funcs.dpms.
- *
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
@@ -458,34 +359,16 @@ struct drm_bridge_funcs {
/**
* @atomic_post_disable:
*
- * The bridge must assume that the display pipe (i.e. clocks and timing
- * signals) feeding this bridge is no longer running when the
- * @atomic_post_disable is called.
- *
- * This callback should perform all the actions required by the hardware
- * after it has stopped receiving signals from the preceding element.
+ * This callback should disable the bridge. It is called right after the
+ * preceding element in the display pipe is disabled. If the preceding
+ * element is a bridge this means it's called after that bridge's
+ * @atomic_post_disable or @post_disable function. If the preceding
+ * element is a &drm_encoder it's called right after the encoder's
+ * &drm_encoder_helper_funcs.atomic_disable hook.
*
- * If the preceding element is a &drm_bridge, then this is called after
- * that bridge is post-disabled (unless marked otherwise by the
- * @pre_enable_prev_first flag) via one of:
- *
- * - &drm_bridge_funcs.post_disable
- * - &drm_bridge_funcs.atomic_post_disable
- *
- * If the preceding element of the bridge is a display controller, then
- * this callback is called after the encoder is disabled via one of:
- *
- * - &drm_encoder_helper_funcs.atomic_disable
- * - &drm_encoder_helper_funcs.prepare
- * - &drm_encoder_helper_funcs.disable
- * - &drm_encoder_helper_funcs.dpms
- *
- * and the CRTC is disabled via one of:
- *
- * - &drm_crtc_helper_funcs.prepare
- * - &drm_crtc_helper_funcs.atomic_disable
- * - &drm_crtc_helper_funcs.disable
- * - &drm_crtc_helper_funcs.dpms
+ * The bridge must assume that the display pipe (i.e. clocks and timing
+ * signals) feeding it is no longer running when this callback is
+ * called.
*
* The @atomic_post_disable callback is optional.
*/
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index 70a7991f784f..eb29e5309f0a 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -209,6 +209,19 @@ struct drm_pagemap_devmem_ops {
struct dma_fence *pre_migrate_fence);
};
+#if IS_ENABLED(CONFIG_ZONE_DEVICE)
+
+struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
+
+#else
+
+static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
+{
+ return NULL;
+}
+
+#endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
+
/**
* struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
*
@@ -233,6 +246,8 @@ struct drm_pagemap_devmem {
struct dma_fence *pre_migrate_fence;
};
+#if IS_ENABLED(CONFIG_ZONE_DEVICE)
+
int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
struct mm_struct *mm,
unsigned long start, unsigned long end,
@@ -243,8 +258,6 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void);
-struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
-
void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
struct device *dev, struct mm_struct *mm,
const struct drm_pagemap_devmem_ops *ops,
@@ -256,4 +269,6 @@ int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
struct mm_struct *mm,
unsigned long timeslice_ms);
+#endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
+
#endif
diff --git a/include/dt-bindings/clock/google,gs101.h b/include/dt-bindings/clock/google,gs101.h
index 442f9e9037dc..7a14dcb9f17b 100644
--- a/include/dt-bindings/clock/google,gs101.h
+++ b/include/dt-bindings/clock/google,gs101.h
@@ -313,6 +313,42 @@
#define CLK_APM_PLL_DIV4_APM 70
#define CLK_APM_PLL_DIV16_APM 71
+/* CMU_DPU */
+#define CLK_MOUT_DPU_BUS_USER 1
+#define CLK_DOUT_DPU_BUSP 2
+#define CLK_GOUT_DPU_PCLK 3
+#define CLK_GOUT_DPU_CLK_DPU_OSCCLK_CLK 4
+#define CLK_GOUT_DPU_AD_APB_DPU_DMA_PCLKM 5
+#define CLK_GOUT_DPU_DPUF_ACLK_DMA 6
+#define CLK_GOUT_DPU_DPUF_ACLK_DPP 7
+#define CLK_GOUT_DPU_D_TZPC_DPU_PCLK 8
+#define CLK_GOUT_DPU_GPC_DPU_PCLK 9
+#define CLK_GOUT_DPU_LHM_AXI_P_DPU_I_CLK 10
+#define CLK_GOUT_DPU_LHS_AXI_D0_DPU_I_CLK 11
+#define CLK_GOUT_DPU_LHS_AXI_D1_DPU_I_CLK 12
+#define CLK_GOUT_DPU_LHS_AXI_D2_DPU_I_CLK 13
+#define CLK_GOUT_DPU_PPMU_DPUD0_ACLK 14
+#define CLK_GOUT_DPU_PPMU_DPUD0_PCLK 15
+#define CLK_GOUT_DPU_PPMU_DPUD1_ACLK 16
+#define CLK_GOUT_DPU_PPMU_DPUD1_PCLK 17
+#define CLK_GOUT_DPU_PPMU_DPUD2_ACLK 18
+#define CLK_GOUT_DPU_PPMU_DPUD2_PCLK 19
+#define CLK_GOUT_DPU_CLK_DPU_BUSD_CLK 20
+#define CLK_GOUT_DPU_CLK_DPU_BUSP_CLK 21
+#define CLK_GOUT_DPU_SSMT_DPU0_ACLK 22
+#define CLK_GOUT_DPU_SSMT_DPU0_PCLK 23
+#define CLK_GOUT_DPU_SSMT_DPU1_ACLK 24
+#define CLK_GOUT_DPU_SSMT_DPU1_PCLK 25
+#define CLK_GOUT_DPU_SSMT_DPU2_ACLK 26
+#define CLK_GOUT_DPU_SSMT_DPU2_PCLK 27
+#define CLK_GOUT_DPU_SYSMMU_DPUD0_CLK_S1 28
+#define CLK_GOUT_DPU_SYSMMU_DPUD0_CLK_S2 29
+#define CLK_GOUT_DPU_SYSMMU_DPUD1_CLK_S1 30
+#define CLK_GOUT_DPU_SYSMMU_DPUD1_CLK_S2 31
+#define CLK_GOUT_DPU_SYSMMU_DPUD2_CLK_S1 32
+#define CLK_GOUT_DPU_SYSMMU_DPUD2_CLK_S2 33
+#define CLK_GOUT_DPU_SYSREG_DPU_PCLK 34
+
/* CMU_HSI0 */
#define CLK_FOUT_USB_PLL 1
#define CLK_MOUT_PLL_USB 2
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8917.h b/include/dt-bindings/clock/qcom,gcc-msm8917.h
index 4e3897b3669d..4265460bfb30 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8917.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8917.h
@@ -194,6 +194,7 @@
#define GCC_QUSB2_PHY_BCR 2
#define GCC_USB_HS_BCR 3
#define GCC_USB2_HS_PHY_ONLY_BCR 4
+#define GCC_MDSS_BCR 5
/* GDSCs */
#define CPP_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,x1e80100-gcc.h b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
index 62aa12425592..d905804e6465 100644
--- a/include/dt-bindings/clock/qcom,x1e80100-gcc.h
+++ b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
@@ -387,6 +387,9 @@
#define GCC_USB4_2_PHY_RX0_CLK_SRC 377
#define GCC_USB4_2_PHY_RX1_CLK_SRC 378
#define GCC_USB4_2_PHY_SYS_CLK_SRC 379
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 380
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 381
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 382
/* GCC power domains */
#define GCC_PCIE_0_TUNNEL_GDSC 0
diff --git a/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
index 2a805e06487b..c4863e444458 100644
--- a/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
+++ b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
@@ -31,5 +31,8 @@
#define R9A09G077_ETCLKC 19
#define R9A09G077_ETCLKD 20
#define R9A09G077_ETCLKE 21
+#define R9A09G077_XSPI_CLK0 22
+#define R9A09G077_XSPI_CLK1 23
+#define R9A09G077_PCLKCAN 24
#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
index 09da0ad33be6..0d53f1e65077 100644
--- a/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
+++ b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
@@ -31,5 +31,8 @@
#define R9A09G087_ETCLKC 19
#define R9A09G087_ETCLKD 20
#define R9A09G087_ETCLKE 21
+#define R9A09G087_XSPI_CLK0 22
+#define R9A09G087_XSPI_CLK1 23
+#define R9A09G087_PCLKCAN 24
#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__ */
diff --git a/include/dt-bindings/power/qcom,rpmhpd.h b/include/dt-bindings/power/qcom,rpmhpd.h
index 50e7c886709d..06851363ae0e 100644
--- a/include/dt-bindings/power/qcom,rpmhpd.h
+++ b/include/dt-bindings/power/qcom,rpmhpd.h
@@ -264,5 +264,6 @@
#define SC8280XP_NSP 13
#define SC8280XP_QPHY 14
#define SC8280XP_XO 15
+#define SC8280XP_MXC_AO 16
#endif
diff --git a/include/dt-bindings/reset/spacemit,k3-resets.h b/include/dt-bindings/reset/spacemit,k3-resets.h
new file mode 100644
index 000000000000..79ac1c22b7b5
--- /dev/null
+++ b/include/dt-bindings/reset/spacemit,k3-resets.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 SpacemiT Technology Co. Ltd
+ */
+
+#ifndef _DT_BINDINGS_RESET_SPACEMIT_K3_RESETS_H_
+#define _DT_BINDINGS_RESET_SPACEMIT_K3_RESETS_H_
+
+/* MPMU resets */
+#define RESET_MPMU_WDT 0
+#define RESET_MPMU_RIPC 1
+
+/* APBC resets */
+#define RESET_APBC_UART0 0
+#define RESET_APBC_UART2 1
+#define RESET_APBC_UART3 2
+#define RESET_APBC_UART4 3
+#define RESET_APBC_UART5 4
+#define RESET_APBC_UART6 5
+#define RESET_APBC_UART7 6
+#define RESET_APBC_UART8 7
+#define RESET_APBC_UART9 8
+#define RESET_APBC_UART10 9
+#define RESET_APBC_GPIO 10
+#define RESET_APBC_PWM0 11
+#define RESET_APBC_PWM1 12
+#define RESET_APBC_PWM2 13
+#define RESET_APBC_PWM3 14
+#define RESET_APBC_PWM4 15
+#define RESET_APBC_PWM5 16
+#define RESET_APBC_PWM6 17
+#define RESET_APBC_PWM7 18
+#define RESET_APBC_PWM8 19
+#define RESET_APBC_PWM9 20
+#define RESET_APBC_PWM10 21
+#define RESET_APBC_PWM11 22
+#define RESET_APBC_PWM12 23
+#define RESET_APBC_PWM13 24
+#define RESET_APBC_PWM14 25
+#define RESET_APBC_PWM15 26
+#define RESET_APBC_PWM16 27
+#define RESET_APBC_PWM17 28
+#define RESET_APBC_PWM18 29
+#define RESET_APBC_PWM19 30
+#define RESET_APBC_SPI0 31
+#define RESET_APBC_SPI1 32
+#define RESET_APBC_SPI3 33
+#define RESET_APBC_RTC 34
+#define RESET_APBC_TWSI0 35
+#define RESET_APBC_TWSI1 36
+#define RESET_APBC_TWSI2 37
+#define RESET_APBC_TWSI4 38
+#define RESET_APBC_TWSI5 39
+#define RESET_APBC_TWSI6 40
+#define RESET_APBC_TWSI8 41
+#define RESET_APBC_TIMERS0 42
+#define RESET_APBC_TIMERS1 43
+#define RESET_APBC_TIMERS2 44
+#define RESET_APBC_TIMERS3 45
+#define RESET_APBC_TIMERS4 46
+#define RESET_APBC_TIMERS5 47
+#define RESET_APBC_TIMERS6 48
+#define RESET_APBC_TIMERS7 49
+#define RESET_APBC_AIB 50
+#define RESET_APBC_ONEWIRE 51
+#define RESET_APBC_I2S0 52
+#define RESET_APBC_I2S1 53
+#define RESET_APBC_I2S2 54
+#define RESET_APBC_I2S3 55
+#define RESET_APBC_I2S4 56
+#define RESET_APBC_I2S5 57
+#define RESET_APBC_DRO 58
+#define RESET_APBC_IR0 59
+#define RESET_APBC_IR1 60
+#define RESET_APBC_TSEN 61
+#define RESET_IPC_AP2AUD 62
+#define RESET_APBC_CAN0 63
+#define RESET_APBC_CAN1 64
+#define RESET_APBC_CAN2 65
+#define RESET_APBC_CAN3 66
+#define RESET_APBC_CAN4 67
+
+/* APMU resets */
+#define RESET_APMU_CSI 0
+#define RESET_APMU_CCIC2PHY 1
+#define RESET_APMU_CCIC3PHY 2
+#define RESET_APMU_ISP_CIBUS 3
+#define RESET_APMU_DSI_ESC 4
+#define RESET_APMU_LCD 5
+#define RESET_APMU_V2D 6
+#define RESET_APMU_LCD_MCLK 7
+#define RESET_APMU_LCD_DSCCLK 8
+#define RESET_APMU_SC2_HCLK 9
+#define RESET_APMU_CCIC_4X 10
+#define RESET_APMU_CCIC1_PHY 11
+#define RESET_APMU_SDH_AXI 12
+#define RESET_APMU_SDH0 13
+#define RESET_APMU_SDH1 14
+#define RESET_APMU_SDH2 15
+#define RESET_APMU_USB2 16
+#define RESET_APMU_USB3_PORTA 17
+#define RESET_APMU_USB3_PORTB 18
+#define RESET_APMU_USB3_PORTC 19
+#define RESET_APMU_USB3_PORTD 20
+#define RESET_APMU_QSPI 21
+#define RESET_APMU_QSPI_BUS 22
+#define RESET_APMU_DMA 23
+#define RESET_APMU_AES_WTM 24
+#define RESET_APMU_MCB_DCLK 25
+#define RESET_APMU_MCB_ACLK 26
+#define RESET_APMU_VPU 27
+#define RESET_APMU_DTC 28
+#define RESET_APMU_GPU 29
+#define RESET_APMU_ALZO 30
+#define RESET_APMU_MC 31
+#define RESET_APMU_CPU0_POP 32
+#define RESET_APMU_CPU0_SW 33
+#define RESET_APMU_CPU1_POP 34
+#define RESET_APMU_CPU1_SW 35
+#define RESET_APMU_CPU2_POP 36
+#define RESET_APMU_CPU2_SW 37
+#define RESET_APMU_CPU3_POP 38
+#define RESET_APMU_CPU3_SW 39
+#define RESET_APMU_C0_MPSUB_SW 40
+#define RESET_APMU_CPU4_POP 41
+#define RESET_APMU_CPU4_SW 42
+#define RESET_APMU_CPU5_POP 43
+#define RESET_APMU_CPU5_SW 44
+#define RESET_APMU_CPU6_POP 45
+#define RESET_APMU_CPU6_SW 46
+#define RESET_APMU_CPU7_POP 47
+#define RESET_APMU_CPU7_SW 48
+#define RESET_APMU_C1_MPSUB_SW 49
+#define RESET_APMU_MPSUB_DBG 50
+#define RESET_APMU_UCIE 51
+#define RESET_APMU_RCPU 52
+#define RESET_APMU_DSI4LN2_ESCCLK 53
+#define RESET_APMU_DSI4LN2_LCD_SW 54
+#define RESET_APMU_DSI4LN2_LCD_MCLK 55
+#define RESET_APMU_DSI4LN2_LCD_DSCCLK 56
+#define RESET_APMU_DSI4LN2_DPU_ACLK 57
+#define RESET_APMU_DPU_ACLK 58
+#define RESET_APMU_UFS_ACLK 59
+#define RESET_APMU_EDP0 60
+#define RESET_APMU_EDP1 61
+#define RESET_APMU_PCIE_PORTA 62
+#define RESET_APMU_PCIE_PORTB 63
+#define RESET_APMU_PCIE_PORTC 64
+#define RESET_APMU_PCIE_PORTD 65
+#define RESET_APMU_PCIE_PORTE 66
+#define RESET_APMU_EMAC0 67
+#define RESET_APMU_EMAC1 68
+#define RESET_APMU_EMAC2 69
+#define RESET_APMU_ESPI_MCLK 70
+#define RESET_APMU_ESPI_SCLK 71
+
+/* DCIU resets*/
+#define RESET_DCIU_HDMA 0
+#define RESET_DCIU_DMA350 1
+#define RESET_DCIU_DMA350_0 2
+#define RESET_DCIU_DMA350_1 3
+#define RESET_DCIU_AXIDMA0 4
+#define RESET_DCIU_AXIDMA1 5
+#define RESET_DCIU_AXIDMA2 6
+#define RESET_DCIU_AXIDMA3 7
+#define RESET_DCIU_AXIDMA4 8
+#define RESET_DCIU_AXIDMA5 9
+#define RESET_DCIU_AXIDMA6 10
+#define RESET_DCIU_AXIDMA7 11
+
+#endif /* _DT_BINDINGS_RESET_SPACEMIT_K3_H_ */
diff --git a/include/dt-bindings/thermal/mediatek,lvts-thermal.h b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
index ddc7302a510a..350f98178b26 100644
--- a/include/dt-bindings/thermal/mediatek,lvts-thermal.h
+++ b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
@@ -7,6 +7,9 @@
#ifndef __MEDIATEK_LVTS_DT_H
#define __MEDIATEK_LVTS_DT_H
+#define MT7987_CPU 0
+#define MT7987_ETH2P5G 1
+
#define MT7988_CPU_0 0
#define MT7988_CPU_1 1
#define MT7988_ETH2P5G_0 2
@@ -80,4 +83,30 @@
#define MT8192_AP_MD1 15
#define MT8192_AP_MD2 16
+#define MT8196_MCU_MEDIUM_CPU6_0 0
+#define MT8196_MCU_MEDIUM_CPU6_1 1
+#define MT8196_MCU_DSU2 2
+#define MT8196_MCU_DSU3 3
+#define MT8196_MCU_LITTLE_CPU3 4
+#define MT8196_MCU_LITTLE_CPU0 5
+#define MT8196_MCU_LITTLE_CPU1 6
+#define MT8196_MCU_LITTLE_CPU2 7
+#define MT8196_MCU_MEDIUM_CPU4_0 8
+#define MT8196_MCU_MEDIUM_CPU4_1 9
+#define MT8196_MCU_MEDIUM_CPU5_0 10
+#define MT8196_MCU_MEDIUM_CPU5_1 11
+#define MT8196_MCU_DSU0 12
+#define MT8196_MCU_DSU1 13
+#define MT8196_MCU_BIG_CPU7_0 14
+#define MT8196_MCU_BIG_CPU7_1 15
+
+#define MT8196_AP_TOP0 0
+#define MT8196_AP_TOP1 1
+#define MT8196_AP_TOP2 2
+#define MT8196_AP_TOP3 3
+#define MT8196_AP_BOT0 4
+#define MT8196_AP_BOT1 5
+#define MT8196_AP_BOT2 6
+#define MT8196_AP_BOT3 7
+
#endif /* __MEDIATEK_LVTS_DT_H */
diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h
index 04b18d0e37af..30fbbde81c5c 100644
--- a/include/hyperv/hvgdk_mini.h
+++ b/include/hyperv/hvgdk_mini.h
@@ -578,9 +578,12 @@ struct hv_tlb_flush { /* HV_INPUT_FLUSH_VIRTUAL_ADDRESS_LIST */
struct hv_tlb_flush_ex {
u64 address_space;
u64 flags;
- struct hv_vpset hv_vp_set;
- u64 gva_list[];
+ __TRAILING_OVERLAP(struct hv_vpset, hv_vp_set, bank_contents, __packed,
+ u64 gva_list[];
+ );
} __packed;
+static_assert(offsetof(struct hv_tlb_flush_ex, hv_vp_set.bank_contents) ==
+ offsetof(struct hv_tlb_flush_ex, gva_list));
struct ms_hyperv_tsc_page { /* HV_REFERENCE_TSC_PAGE */
volatile u32 tsc_sequence;
diff --git a/include/hyperv/hvhdk.h b/include/hyperv/hvhdk.h
index 469186df7826..08965970c17d 100644
--- a/include/hyperv/hvhdk.h
+++ b/include/hyperv/hvhdk.h
@@ -800,6 +800,53 @@ struct hv_x64_memory_intercept_message {
u8 instruction_bytes[16];
} __packed;
+#if IS_ENABLED(CONFIG_ARM64)
+union hv_arm64_vp_execution_state {
+ u16 as_uint16;
+ struct {
+ u16 cpl:2; /* Exception Level (EL) */
+ u16 debug_active:1;
+ u16 interruption_pending:1;
+ u16 vtl:4;
+ u16 virtualization_fault_active:1;
+ u16 reserved:7;
+ } __packed;
+};
+
+struct hv_arm64_intercept_message_header {
+ u32 vp_index;
+ u8 instruction_length;
+ u8 intercept_access_type;
+ union hv_arm64_vp_execution_state execution_state;
+ u64 pc;
+ u64 cpsr;
+} __packed;
+
+union hv_arm64_memory_access_info {
+ u8 as_uint8;
+ struct {
+ u8 gva_valid:1;
+ u8 gva_gpa_valid:1;
+ u8 hypercall_output_pending:1;
+ u8 reserved:5;
+ } __packed;
+};
+
+struct hv_arm64_memory_intercept_message {
+ struct hv_arm64_intercept_message_header header;
+ u32 cache_type; /* enum hv_cache_type */
+ u8 instruction_byte_count;
+ union hv_arm64_memory_access_info memory_access_info;
+ u16 reserved1;
+ u8 instruction_bytes[4];
+ u32 reserved2;
+ u64 guest_virtual_address;
+ u64 guest_physical_address;
+ u64 syndrome;
+} __packed;
+
+#endif /* CONFIG_ARM64 */
+
/*
* Dispatch state for the VP communicated by the hypervisor to the
* VP-dispatching thread in the root on return from HVCALL_DISPATCH_VP.
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 5ec5182b5e57..9cd1594ab697 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -906,7 +906,8 @@ do { \
}; \
\
_KUNIT_SAVE_LOC(test); \
- if (likely((__left) && (__right) && (strcmp(__left, __right) op 0))) \
+ if (likely(!IS_ERR_OR_NULL(__left) && !IS_ERR_OR_NULL(__right) && \
+ (strcmp(__left, __right) op 0))) \
break; \
\
\
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fbf0c3a65f59..3a412dcebc29 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -107,6 +107,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC,
+ ACPI_IRQ_MODEL_GIC_V5,
ACPI_IRQ_MODEL_LPIC,
ACPI_IRQ_MODEL_RINTC,
ACPI_IRQ_MODEL_COUNT
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index d4ed5622cf2b..17bb3374f4ca 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -27,12 +27,15 @@ int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
-int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+struct fwnode_handle *iort_iwb_handle(u32 iwb_id);
#ifdef CONFIG_ACPI_IORT
u32 iort_msi_map_id(struct device *dev, u32 id);
+u32 iort_msi_xlate(struct device *dev, u32 id, struct fwnode_handle **node);
+int iort_its_translate_pa(struct fwnode_handle *node, phys_addr_t *base);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
enum irq_domain_bus_token bus_token);
+int iort_pmsi_get_msi_info(struct device *dev, u32 *dev_id, phys_addr_t *pa);
void acpi_configure_pmsi_domain(struct device *dev);
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
struct list_head *head);
@@ -46,9 +49,15 @@ phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
static inline u32 iort_msi_map_id(struct device *dev, u32 id)
{ return id; }
+static inline u32 iort_msi_xlate(struct device *dev, u32 id, struct fwnode_handle **node)
+{ return id; }
+static inline int iort_its_translate_pa(struct fwnode_handle *node, phys_addr_t *base)
+{ return -ENODEV; }
static inline struct irq_domain *iort_get_device_domain(
struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ return NULL; }
+static inline int iort_pmsi_get_msi_info(struct device *dev, u32 *dev_id, phys_addr_t *pa)
+{ return -ENODEV; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
static inline
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 2f9d36b72bd8..cdc25f8979f7 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -2121,7 +2121,7 @@ raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
@@ -2155,7 +2155,7 @@ raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
@@ -2189,7 +2189,7 @@ raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
@@ -2222,7 +2222,7 @@ raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
@@ -4247,7 +4247,7 @@ raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
@@ -4281,7 +4281,7 @@ raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
@@ -4315,7 +4315,7 @@ raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
@@ -4348,7 +4348,7 @@ raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
@@ -4690,4 +4690,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// b565db590afeeff0d7c9485ccbca5bb6e155749f
+// 206314f82b8b73a5c3aa69cf7f35ac9e7b5d6b58
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index 37ab6314a9f7..feb3b5dc3e96 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -1269,7 +1269,7 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
@@ -1292,7 +1292,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_acquire() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
@@ -1314,7 +1314,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_release() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
@@ -1337,7 +1337,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_relaxed() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
@@ -2847,7 +2847,7 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
@@ -2870,7 +2870,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_acquire() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
@@ -2892,7 +2892,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_release() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
@@ -2915,7 +2915,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_relaxed() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
@@ -4425,7 +4425,7 @@ atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
@@ -4448,7 +4448,7 @@ atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_acquire() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
@@ -4470,7 +4470,7 @@ atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_release() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
@@ -4493,7 +4493,7 @@ atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_relaxed() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
@@ -5050,4 +5050,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// f618ac667f868941a84ce0ab2242f1786e049ed4
+// 9dd948d3012b22c4e75933a5172983f912e46439
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
index f86b29d90877..6a4e47d2db35 100644
--- a/include/linux/atomic/atomic-long.h
+++ b/include/linux/atomic/atomic-long.h
@@ -1449,7 +1449,7 @@ raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
@@ -1473,7 +1473,7 @@ raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_acquire() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
@@ -1497,7 +1497,7 @@ raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_release() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
@@ -1521,7 +1521,7 @@ raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_relaxed() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
@@ -1809,4 +1809,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
}
#endif /* _LINUX_ATOMIC_LONG_H */
-// eadf183c3600b8b92b91839dd3be6bcc560c752d
+// 4b882bf19018602c10816c52f8b4ae280adc887b
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 536f8ee8da81..a5be09c8497a 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -128,12 +128,6 @@ enum audit_nfcfgop {
extern int __init audit_register_class(int class, unsigned *list);
extern int audit_classify_syscall(int abi, unsigned syscall);
extern int audit_classify_arch(int arch);
-/* only for compat system calls */
-extern unsigned compat_write_class[];
-extern unsigned compat_read_class[];
-extern unsigned compat_dir_class[];
-extern unsigned compat_chattr_class[];
-extern unsigned compat_signal_class[];
/* audit_names->type values */
#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
@@ -195,6 +189,8 @@ extern int audit_log_subj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
extern int audit_log_obj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
extern int audit_log_task_context(struct audit_buffer *ab);
extern void audit_log_task_info(struct audit_buffer *ab);
+extern int audit_log_nf_skb(struct audit_buffer *ab,
+ const struct sk_buff *skb, u8 nfproto);
extern int audit_update_lsm_rules(void);
@@ -272,6 +268,12 @@ static inline int audit_log_task_context(struct audit_buffer *ab)
static inline void audit_log_task_info(struct audit_buffer *ab)
{ }
+static inline int audit_log_nf_skb(struct audit_buffer *ab,
+ const struct sk_buff *skb, u8 nfproto)
+{
+ return 0;
+}
+
static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
{
return INVALID_UID;
@@ -316,7 +318,6 @@ extern void __audit_uring_exit(int success, long code);
extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3);
extern void __audit_syscall_exit(int ret_success, long ret_value);
-extern struct filename *__audit_reusename(const __user char *uptr);
extern void __audit_getname(struct filename *name);
extern void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags);
@@ -380,12 +381,6 @@ static inline void audit_syscall_exit(void *pt_regs)
__audit_syscall_exit(success, return_code);
}
}
-static inline struct filename *audit_reusename(const __user char *name)
-{
- if (unlikely(!audit_dummy_context()))
- return __audit_reusename(name);
- return NULL;
-}
static inline void audit_getname(struct filename *name)
{
if (unlikely(!audit_dummy_context()))
@@ -624,10 +619,6 @@ static inline struct audit_context *audit_context(void)
{
return NULL;
}
-static inline struct filename *audit_reusename(const __user char *name)
-{
- return NULL;
-}
static inline void audit_getname(struct filename *name)
{ }
static inline void audit_inode(struct filename *name,
diff --git a/include/linux/audit_arch.h b/include/linux/audit_arch.h
index 0e34d673ef17..2b8153791e6a 100644
--- a/include/linux/audit_arch.h
+++ b/include/linux/audit_arch.h
@@ -23,4 +23,11 @@ enum auditsc_class_t {
extern int audit_classify_compat_syscall(int abi, unsigned syscall);
+/* only for compat system calls */
+extern unsigned compat_write_class[];
+extern unsigned compat_read_class[];
+extern unsigned compat_dir_class[];
+extern unsigned compat_chattr_class[];
+extern unsigned compat_signal_class[];
+
#endif
diff --git a/include/linux/bio.h b/include/linux/bio.h
index c75a9b3672aa..36a3f2275ecd 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -256,12 +256,6 @@ static inline struct folio *bio_first_folio_all(struct bio *bio)
return page_folio(bio_first_page_all(bio));
}
-static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
-{
- WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
- return &bio->bi_io_vec[bio->bi_vcnt - 1];
-}
-
/**
* struct folio_iter - State for iterating all folios in a bio.
* @folio: The current folio we're iterating. NULL after the last folio.
@@ -403,6 +397,29 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
return iov_iter_npages(iter, max_segs);
}
+/**
+ * bio_iov_bounce_nr_vecs - calculate number of bvecs for a bounce bio
+ * @iter: iter to bounce from
+ * @op: REQ_OP_* for the bio
+ *
+ * Calculates how many bvecs are needed for the next bio to bounce from/to
+ * @iter.
+ */
+static inline unsigned short
+bio_iov_bounce_nr_vecs(struct iov_iter *iter, blk_opf_t op)
+{
+ /*
+ * We still need to bounce bvec iters, so don't special case them
+ * here unlike in bio_iov_vecs_to_alloc.
+ *
+ * For reads we need to use a vector for the bounce buffer, account
+ * for that here.
+ */
+ if (op_is_write(op))
+ return iov_iter_npages(iter, BIO_MAX_VECS);
+ return iov_iter_npages(iter, BIO_MAX_VECS - 1) + 1;
+}
+
struct request_queue;
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
@@ -414,6 +431,7 @@ static inline void bio_init_inline(struct bio *bio, struct block_device *bdev,
}
extern void bio_uninit(struct bio *);
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
+void bio_reuse(struct bio *bio, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
@@ -456,6 +474,9 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
+int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter);
+void bio_iov_iter_unbounce(struct bio *bio, bool is_error, bool mark_dirty);
+
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
struct bio *src, struct bvec_iter *src_iter);
extern void bio_copy_data(struct bio *dst, struct bio *src);
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index c0989b5b0407..7869a6e59b6a 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -7,6 +7,18 @@
#include <linux/atomic.h>
#include <linux/bug.h>
+#include <asm/processor.h> /* for cpu_relax() */
+
+/*
+ * For static context analysis, we need a unique token for each possible bit
+ * that can be used as a bit_spinlock. The easiest way to do that is to create a
+ * fake context that we can cast to with the __bitlock(bitnum, addr) macro
+ * below, which will give us unique instances for each (bit, addr) pair that the
+ * static analysis can use.
+ */
+context_lock_struct(__context_bitlock) { };
+#define __bitlock(bitnum, addr) (struct __context_bitlock *)(bitnum + (addr))
+
/*
* bit-based spin_lock()
*
@@ -14,6 +26,7 @@
* are significantly faster.
*/
static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
+ __acquires(__bitlock(bitnum, addr))
{
/*
* Assuming the lock is uncontended, this never enters
@@ -32,13 +45,14 @@ static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
preempt_disable();
}
#endif
- __acquire(bitlock);
+ __acquire(__bitlock(bitnum, addr));
}
/*
* Return true if it was acquired
*/
static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+ __cond_acquires(true, __bitlock(bitnum, addr))
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -47,7 +61,7 @@ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
return 0;
}
#endif
- __acquire(bitlock);
+ __acquire(__bitlock(bitnum, addr));
return 1;
}
@@ -55,6 +69,7 @@ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
* bit-based spin_unlock()
*/
static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+ __releases(__bitlock(bitnum, addr))
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -63,7 +78,7 @@ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(bitnum, addr));
}
/*
@@ -72,6 +87,7 @@ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
* protecting the rest of the flags in the word.
*/
static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+ __releases(__bitlock(bitnum, addr))
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -80,7 +96,7 @@ static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
__clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(bitnum, addr));
}
/*
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 126dc5b380af..54aeeef1f0ec 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -8,6 +8,7 @@
#define _LINUX_BITFIELD_H
#include <linux/build_bug.h>
+#include <linux/compiler.h>
#include <linux/typecheck.h>
#include <asm/byteorder.h>
@@ -243,7 +244,7 @@ __MAKE_OP(64)
#define __field_prep(mask, val) \
({ \
- __auto_type __mask = (mask); \
+ auto __mask = (mask); \
typeof(__mask) __val = (val); \
unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
__ffs(__mask) : __ffs64(__mask); \
@@ -252,7 +253,7 @@ __MAKE_OP(64)
#define __field_get(mask, reg) \
({ \
- __auto_type __mask = (mask); \
+ auto __mask = (mask); \
typeof(__mask) __reg = (reg); \
unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
__ffs(__mask) : __ffs64(__mask); \
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index 58b0c5254a67..f7c3cb4a342f 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -132,6 +132,11 @@ static inline bool bio_has_crypt_ctx(struct bio *bio)
return bio->bi_crypt_context;
}
+static inline struct bio_crypt_ctx *bio_crypt_ctx(struct bio *bio)
+{
+ return bio->bi_crypt_context;
+}
+
void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
gfp_t gfp_mask);
@@ -169,8 +174,35 @@ static inline bool bio_has_crypt_ctx(struct bio *bio)
return false;
}
+static inline struct bio_crypt_ctx *bio_crypt_ctx(struct bio *bio)
+{
+ return NULL;
+}
+
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+bool __blk_crypto_submit_bio(struct bio *bio);
+
+/**
+ * blk_crypto_submit_bio - Submit a bio that may have a crypto context
+ * @bio: bio to submit
+ *
+ * If @bio has no crypto context, or the crypt context attached to @bio is
+ * supported by the underlying device's inline encryption hardware, just submit
+ * @bio.
+ *
+ * Otherwise, try to perform en/decryption for this bio by falling back to the
+ * kernel crypto API. For encryption this means submitting newly allocated
+ * bios for the encrypted payload while keeping back the source bio until they
+ * complete, while for reads the decryption happens in-place by a hooked in
+ * completion handler.
+ */
+static inline void blk_crypto_submit_bio(struct bio *bio)
+{
+ if (!bio_has_crypt_ctx(bio) || __blk_crypto_submit_bio(bio))
+ submit_bio(bio);
+}
+
int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
/**
* bio_crypt_clone - clone bio encryption context
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index a6b84206eb94..c15b1ac62765 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -91,7 +91,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
return bio_integrity_intervals(bi, sectors) * bi->metadata_size;
}
-static inline bool blk_integrity_rq(struct request *rq)
+static inline bool blk_integrity_rq(const struct request *rq)
{
return rq->cmd_flags & REQ_INTEGRITY;
}
@@ -168,9 +168,9 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
{
return 0;
}
-static inline int blk_integrity_rq(struct request *rq)
+static inline bool blk_integrity_rq(const struct request *rq)
{
- return 0;
+ return false;
}
static inline struct bio_vec rq_integrity_vec(struct request *rq)
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
index cb88fc791fbd..214c181ff2c9 100644
--- a/include/linux/blk-mq-dma.h
+++ b/include/linux/blk-mq-dma.h
@@ -28,7 +28,7 @@ struct blk_dma_iter {
bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
struct dma_iova_state *state, struct blk_dma_iter *iter);
bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
- struct dma_iova_state *state, struct blk_dma_iter *iter);
+ struct blk_dma_iter *iter);
/**
* blk_rq_dma_map_coalesce - were all segments coalesced?
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index cae9e857aea4..18a2388ba581 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -13,6 +13,7 @@
struct blk_mq_tags;
struct blk_flush_queue;
+struct io_comp_batch;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_DEFAULT_RQ 128
@@ -22,7 +23,8 @@ enum rq_end_io_ret {
RQ_END_IO_FREE,
};
-typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t,
+ const struct io_comp_batch *);
/*
* request flags */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 5dc061d318a4..d59553324a84 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -232,6 +232,8 @@ struct bio {
atomic_t __bi_remaining;
+ /* The actual vec list, preserved by bio_reset() */
+ struct bio_vec *bi_io_vec;
struct bvec_iter bi_iter;
union {
@@ -275,13 +277,12 @@ struct bio {
atomic_t __bi_cnt; /* pin count */
- struct bio_vec *bi_io_vec; /* the actual vec list */
-
struct bio_set *bi_pool;
};
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
-#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
+#define BIO_MAX_SIZE UINT_MAX /* max value of bi_iter.bi_size */
+#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> SECTOR_SHIFT)
static inline struct bio_vec *bio_inline_vecs(struct bio *bio)
{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 72e34acd439c..99ef8cd7673c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -340,14 +340,13 @@ typedef unsigned int __bitwise blk_features_t;
/* skip this queue in blk_mq_(un)quiesce_tagset */
#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
+/* atomic writes enabled */
+#define BLK_FEAT_ATOMIC_WRITES ((__force blk_features_t)(1u << 14))
+
/* undocumented magic for bcache */
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
((__force blk_features_t)(1u << 15))
-/* atomic writes enabled */
-#define BLK_FEAT_ATOMIC_WRITES \
- ((__force blk_features_t)(1u << 16))
-
/*
* Flags automatically inherited when stacking limits.
*/
@@ -551,7 +550,8 @@ struct request_queue {
/*
* queue settings
*/
- unsigned long nr_requests; /* Max # of requests */
+ unsigned int nr_requests; /* Max # of requests */
+ unsigned int async_depth; /* Max # of async requests */
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_crypto_profile *crypto_profile;
@@ -681,7 +681,7 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
-#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
+#define blk_queue_rot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL)
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
#define blk_queue_passthrough_stat(q) \
((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
@@ -1026,7 +1026,7 @@ extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
-/* Helper to convert REQ_OP_XXX to its string format XXX */
+/* Convert a request operation REQ_OP_name into the string "name" */
extern const char *blk_op_str(enum req_op op);
int blk_status_to_errno(blk_status_t status);
@@ -1044,7 +1044,7 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
return bdev->bd_queue; /* this is never NULL */
}
-/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
+/* Convert a zone condition BLK_ZONE_COND_name into the string "name" */
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
static inline unsigned int bio_zone_no(struct bio *bio)
@@ -1462,9 +1462,14 @@ bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
return bdev_limits(bdev)->max_wzeroes_unmap_sectors;
}
+static inline bool bdev_rot(struct block_device *bdev)
+{
+ return blk_queue_rot(bdev_get_queue(bdev));
+}
+
static inline bool bdev_nonrot(struct block_device *bdev)
{
- return blk_queue_nonrot(bdev_get_queue(bdev));
+ return !bdev_rot(bdev);
}
static inline bool bdev_synchronous(struct block_device *bdev)
@@ -1822,6 +1827,7 @@ struct io_comp_batch {
struct rq_list req_list;
bool need_ts;
void (*complete)(struct io_comp_batch *);
+ void *poll_ctx;
};
static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index d1eb5c7729cb..2f535331f926 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -172,7 +172,7 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
-int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value, u64 flags);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
void *value, u64 flags);
@@ -470,7 +470,7 @@ static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
static inline void bpf_cgroup_storage_free(
struct bpf_cgroup_storage *storage) {}
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
- void *value) {
+ void *value, u64 flags) {
return 0;
}
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e5be698256d1..cd9b96434904 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -287,6 +287,7 @@ struct bpf_map_owner {
enum bpf_prog_type type;
bool jited;
bool xdp_has_frags;
+ bool sleepable;
u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
const struct btf_type *attach_func_proto;
enum bpf_attach_type expected_attach_type;
@@ -673,6 +674,22 @@ void bpf_map_free_internal_structs(struct bpf_map *map, void *obj);
int bpf_dynptr_from_file_sleepable(struct file *file, u32 flags,
struct bpf_dynptr *ptr__uninit);
+#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
+void *bpf_arena_alloc_pages_non_sleepable(void *p__map, void *addr__ign, u32 page_cnt, int node_id,
+ u64 flags);
+void bpf_arena_free_pages_non_sleepable(void *p__map, void *ptr__ign, u32 page_cnt);
+#else
+static inline void *bpf_arena_alloc_pages_non_sleepable(void *p__map, void *addr__ign, u32 page_cnt,
+ int node_id, u64 flags)
+{
+ return NULL;
+}
+
+static inline void bpf_arena_free_pages_non_sleepable(void *p__map, void *ptr__ign, u32 page_cnt)
+{
+}
+#endif
+
extern const struct bpf_map_ops bpf_map_offload_ops;
/* bpf_type_flag contains a set of flags that are applicable to the values of
@@ -737,7 +754,7 @@ enum bpf_type_flag {
MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS),
/* PTR was passed from the kernel in a trusted context, and may be
- * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
+ * passed to kfuncs or BPF helper functions.
* Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
* PTR_UNTRUSTED refers to a kptr that was read directly from a map
* without invoking bpf_kptr_xchg(). What we really need to know is
@@ -1213,6 +1230,9 @@ enum {
#endif
};
+#define BPF_TRAMP_COOKIE_INDEX_SHIFT 8
+#define BPF_TRAMP_IS_RETURN_SHIFT 63
+
struct bpf_tramp_links {
struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
int nr_links;
@@ -1293,6 +1313,7 @@ enum bpf_tramp_prog_type {
BPF_TRAMP_MODIFY_RETURN,
BPF_TRAMP_MAX,
BPF_TRAMP_REPLACE, /* more than MAX */
+ BPF_TRAMP_FSESSION,
};
struct bpf_tramp_image {
@@ -1309,14 +1330,17 @@ struct bpf_tramp_image {
};
struct bpf_trampoline {
- /* hlist for trampoline_table */
- struct hlist_node hlist;
+ /* hlist for trampoline_key_table */
+ struct hlist_node hlist_key;
+ /* hlist for trampoline_ip_table */
+ struct hlist_node hlist_ip;
struct ftrace_ops *fops;
/* serializes access to fields of this trampoline */
struct mutex mutex;
refcount_t refcnt;
u32 flags;
u64 key;
+ unsigned long ip;
struct {
struct btf_func_model model;
void *addr;
@@ -1418,7 +1442,7 @@ bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u64 offset,
void *src, u64 len, u64 flags);
void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u64 offset,
- void *buffer__opt, u64 buffer__szk);
+ void *buffer__nullable, u64 buffer__szk);
static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u64 offset, u64 len)
{
@@ -1742,8 +1766,12 @@ struct bpf_prog_aux {
struct rcu_head rcu;
};
struct bpf_stream stream[2];
+ struct mutex st_ops_assoc_mutex;
+ struct bpf_map __rcu *st_ops_assoc;
};
+#define BPF_NR_CONTEXTS 4 /* normal, softirq, hardirq, NMI */
+
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
@@ -1759,6 +1787,7 @@ struct bpf_prog {
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
call_get_func_ip:1, /* Do we call get_func_ip() */
+ call_session_cookie:1, /* Do we call bpf_session_cookie() */
tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
sleepable:1; /* BPF program is sleepable */
enum bpf_prog_type type; /* Type of BPF program */
@@ -1770,7 +1799,7 @@ struct bpf_prog {
u8 tag[BPF_TAG_SIZE];
};
struct bpf_prog_stats __percpu *stats;
- int __percpu *active;
+ u8 __percpu *active; /* u8[BPF_NR_CONTEXTS] for recursion protection */
unsigned int (*bpf_func)(const void *ctx,
const struct bpf_insn *insn);
struct bpf_prog_aux *aux; /* Auxiliary fields */
@@ -1855,6 +1884,11 @@ struct bpf_tracing_link {
struct bpf_prog *tgt_prog;
};
+struct bpf_fsession_link {
+ struct bpf_tracing_link link;
+ struct bpf_tramp_link fexit;
+};
+
struct bpf_raw_tp_link {
struct bpf_link link;
struct bpf_raw_event_map *btp;
@@ -2002,6 +2036,40 @@ struct bpf_struct_ops_common_value {
enum bpf_struct_ops_state state;
};
+static inline bool bpf_prog_get_recursion_context(struct bpf_prog *prog)
+{
+#ifdef CONFIG_ARM64
+ u8 rctx = interrupt_context_level();
+ u8 *active = this_cpu_ptr(prog->active);
+ u32 val;
+
+ preempt_disable();
+ active[rctx]++;
+ val = le32_to_cpu(*(__le32 *)active);
+ preempt_enable();
+ if (val != BIT(rctx * 8))
+ return false;
+
+ return true;
+#else
+ return this_cpu_inc_return(*(int __percpu *)(prog->active)) == 1;
+#endif
+}
+
+static inline void bpf_prog_put_recursion_context(struct bpf_prog *prog)
+{
+#ifdef CONFIG_ARM64
+ u8 rctx = interrupt_context_level();
+ u8 *active = this_cpu_ptr(prog->active);
+
+ preempt_disable();
+ active[rctx]--;
+ preempt_enable();
+#else
+ this_cpu_dec(*(int __percpu *)(prog->active));
+#endif
+}
+
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
/* This macro helps developer to register a struct_ops type and generate
* type information correctly. Developers should use this macro to register
@@ -2044,6 +2112,9 @@ static inline void bpf_module_put(const void *data, struct module *owner)
module_put(owner);
}
int bpf_struct_ops_link_create(union bpf_attr *attr);
+int bpf_prog_assoc_struct_ops(struct bpf_prog *prog, struct bpf_map *map);
+void bpf_prog_disassoc_struct_ops(struct bpf_prog *prog);
+void *bpf_prog_get_assoc_struct_ops(const struct bpf_prog_aux *aux);
u32 bpf_struct_ops_id(const void *kdata);
#ifdef CONFIG_NET
@@ -2091,6 +2162,17 @@ static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
{
return -EOPNOTSUPP;
}
+static inline int bpf_prog_assoc_struct_ops(struct bpf_prog *prog, struct bpf_map *map)
+{
+ return -EOPNOTSUPP;
+}
+static inline void bpf_prog_disassoc_struct_ops(struct bpf_prog *prog)
+{
+}
+static inline void *bpf_prog_get_assoc_struct_ops(const struct bpf_prog_aux *aux)
+{
+ return NULL;
+}
static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
{
}
@@ -2101,6 +2183,37 @@ static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_op
#endif
+static inline int bpf_fsession_cnt(struct bpf_tramp_links *links)
+{
+ struct bpf_tramp_links fentries = links[BPF_TRAMP_FENTRY];
+ int cnt = 0;
+
+ for (int i = 0; i < links[BPF_TRAMP_FENTRY].nr_links; i++) {
+ if (fentries.links[i]->link.prog->expected_attach_type == BPF_TRACE_FSESSION)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static inline bool bpf_prog_calls_session_cookie(struct bpf_tramp_link *link)
+{
+ return link->link.prog->call_session_cookie;
+}
+
+static inline int bpf_fsession_cookie_cnt(struct bpf_tramp_links *links)
+{
+ struct bpf_tramp_links fentries = links[BPF_TRAMP_FENTRY];
+ int cnt = 0;
+
+ for (int i = 0; i < links[BPF_TRAMP_FENTRY].nr_links; i++) {
+ if (bpf_prog_calls_session_cookie(fentries.links[i]))
+ cnt++;
+ }
+
+ return cnt;
+}
+
int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
const struct bpf_ctx_arg_aux *info, u32 cnt);
@@ -2540,6 +2653,10 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
unsigned long nr_pages, struct page **page_array);
#ifdef CONFIG_MEMCG
+void bpf_map_memcg_enter(const struct bpf_map *map, struct mem_cgroup **old_memcg,
+ struct mem_cgroup **new_memcg);
+void bpf_map_memcg_exit(struct mem_cgroup *old_memcg,
+ struct mem_cgroup *memcg);
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node);
void *bpf_map_kmalloc_nolock(const struct bpf_map *map, size_t size, gfp_t flags,
@@ -2564,6 +2681,17 @@ void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
kvcalloc(_n, _size, _flags)
#define bpf_map_alloc_percpu(_map, _size, _align, _flags) \
__alloc_percpu_gfp(_size, _align, _flags)
+static inline void bpf_map_memcg_enter(const struct bpf_map *map, struct mem_cgroup **old_memcg,
+ struct mem_cgroup **new_memcg)
+{
+ *new_memcg = NULL;
+ *old_memcg = NULL;
+}
+
+static inline void bpf_map_memcg_exit(struct mem_cgroup *old_memcg,
+ struct mem_cgroup *memcg)
+{
+}
#endif
static inline int
@@ -2764,8 +2892,8 @@ int map_set_for_each_callback_args(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee);
-int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
-int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value, u64 flags);
+int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value, u64 flags);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
@@ -3243,6 +3371,11 @@ static inline void bpf_prog_report_arena_violation(bool write, unsigned long add
}
#endif /* CONFIG_BPF_SYSCALL */
+static inline bool bpf_net_capable(void)
+{
+ return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
+}
+
static __always_inline int
bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
{
@@ -3832,14 +3965,43 @@ bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
}
#endif
+static inline bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type)
+{
+ switch (map_type) {
+ case BPF_MAP_TYPE_PERCPU_ARRAY:
+ case BPF_MAP_TYPE_PERCPU_HASH:
+ case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+ case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
{
- if (flags & ~allowed_flags)
+ u32 cpu;
+
+ if ((u32)flags & ~allowed_flags)
return -EINVAL;
if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))
return -EINVAL;
+ if (!(flags & BPF_F_CPU) && flags >> 32)
+ return -EINVAL;
+
+ if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) {
+ if (!bpf_map_supports_cpu_flags(map->map_type))
+ return -EINVAL;
+ if ((flags & BPF_F_CPU) && (flags & BPF_F_ALL_CPUS))
+ return -EINVAL;
+
+ cpu = flags >> 32;
+ if ((flags & BPF_F_CPU) && cpu >= num_possible_cpus())
+ return -ERANGE;
+ }
+
return 0;
}
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index 66432248cd81..85efa9772530 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -15,12 +15,13 @@
#include <linux/types.h>
#include <linux/bpf_mem_alloc.h>
#include <uapi/linux/btf.h>
+#include <asm/rqspinlock.h>
#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
struct bpf_local_storage_map_bucket {
struct hlist_head list;
- raw_spinlock_t lock;
+ rqspinlock_t lock;
};
/* Thp map is not the primary owner of a bpf_local_storage_elem.
@@ -67,6 +68,11 @@ struct bpf_local_storage_data {
u8 data[] __aligned(8);
};
+#define SELEM_MAP_UNLINKED (1 << 0)
+#define SELEM_STORAGE_UNLINKED (1 << 1)
+#define SELEM_UNLINKED (SELEM_MAP_UNLINKED | SELEM_STORAGE_UNLINKED)
+#define SELEM_TOFREE (1 << 2)
+
/* Linked to bpf_local_storage and bpf_local_storage_map */
struct bpf_local_storage_elem {
struct hlist_node map_node; /* Linked to bpf_local_storage_map */
@@ -79,7 +85,9 @@ struct bpf_local_storage_elem {
* after raw_spin_unlock
*/
};
- /* 8 bytes hole */
+ atomic_t state;
+ bool use_kmalloc_nolock;
+ /* 3 bytes hole */
/* The data is stored in another cacheline to minimize
* the number of cachelines access during a cache hit.
*/
@@ -88,13 +96,14 @@ struct bpf_local_storage_elem {
struct bpf_local_storage {
struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
- struct bpf_local_storage_map __rcu *smap;
struct hlist_head list; /* List of bpf_local_storage_elem */
void *owner; /* The object that owns the above "list" of
* bpf_local_storage_elem.
*/
struct rcu_head rcu;
- raw_spinlock_t lock; /* Protect adding/removing from the "list" */
+ rqspinlock_t lock; /* Protect adding/removing from the "list" */
+ u64 mem_charge; /* Copy of mem charged to owner. Protected by "lock" */
+ refcount_t owner_refcnt;/* Used to pin owner when map_free is uncharging */
bool use_kmalloc_nolock;
};
@@ -162,11 +171,10 @@ bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
return SDATA(selem);
}
-void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
+u32 bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
void bpf_local_storage_map_free(struct bpf_map *map,
- struct bpf_local_storage_cache *cache,
- int __percpu *busy_counter);
+ struct bpf_local_storage_cache *cache);
int bpf_local_storage_map_check_btf(const struct bpf_map *map,
const struct btf *btf,
@@ -176,10 +184,11 @@ int bpf_local_storage_map_check_btf(const struct bpf_map *map,
void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
struct bpf_local_storage_elem *selem);
-void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
+int bpf_selem_unlink(struct bpf_local_storage_elem *selem);
-void bpf_selem_link_map(struct bpf_local_storage_map *smap,
- struct bpf_local_storage_elem *selem);
+int bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem);
struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
diff --git a/include/linux/bpf_mprog.h b/include/linux/bpf_mprog.h
index 929225f7b095..0b9f4caeeb0a 100644
--- a/include/linux/bpf_mprog.h
+++ b/include/linux/bpf_mprog.h
@@ -340,4 +340,14 @@ static inline bool bpf_mprog_supported(enum bpf_prog_type type)
return false;
}
}
+
+static inline bool bpf_mprog_detach_empty(enum bpf_prog_type type)
+{
+ switch (type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ return bpf_net_capable();
+ default:
+ return false;
+ }
+}
#endif /* __BPF_MPROG_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 130bcbd66f60..ef8e45a362d9 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -147,8 +147,12 @@ struct bpf_reg_state {
* registers. Example:
* r1 = r2; both will have r1->id == r2->id == N
* r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10
+ * r3 = r2; both will have r3->id == r2->id == N
+ * w3 += 10; r3->id == N | BPF_ADD_CONST32 and r3->off == 10
*/
-#define BPF_ADD_CONST (1U << 31)
+#define BPF_ADD_CONST64 (1U << 31)
+#define BPF_ADD_CONST32 (1U << 30)
+#define BPF_ADD_CONST (BPF_ADD_CONST64 | BPF_ADD_CONST32)
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
* from a pointer-cast helper, bpf_sk_fullsock() and
@@ -692,12 +696,16 @@ struct bpf_id_pair {
struct bpf_idmap {
u32 tmp_id_gen;
+ u32 cnt;
struct bpf_id_pair map[BPF_ID_MAP_SIZE];
};
struct bpf_idset {
- u32 count;
- u32 ids[BPF_ID_MAP_SIZE];
+ u32 num_ids;
+ struct {
+ u32 id;
+ u32 cnt;
+ } entries[BPF_ID_MAP_SIZE];
};
/* see verifier.c:compute_scc_callchain() */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index f06976ffb63f..48108471c5b1 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -34,7 +34,7 @@
*
* And the following kfunc:
*
- * BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+ * BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE)
*
* All invocations to the kfunc must pass the unmodified, unwalked task:
*
@@ -66,7 +66,6 @@
* return 0;
* }
*/
-#define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */
#define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */
#define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */
#define KF_RCU (1 << 7) /* kfunc takes either rcu or trusted pointer arguments */
@@ -79,6 +78,7 @@
#define KF_ARENA_RET (1 << 13) /* kfunc returns an arena pointer */
#define KF_ARENA_ARG1 (1 << 14) /* kfunc takes an arena pointer as its first argument */
#define KF_ARENA_ARG2 (1 << 15) /* kfunc takes an arena pointer as its second argument */
+#define KF_IMPLICIT_ARGS (1 << 16) /* kfunc has implicit arguments supplied by the verifier */
/*
* Tag marking a kernel function as a kfunc. This is meant to minimize the
@@ -220,6 +220,7 @@ bool btf_is_module(const struct btf *btf);
bool btf_is_vmlinux(const struct btf *btf);
struct module *btf_try_get_module(const struct btf *btf);
u32 btf_nr_types(const struct btf *btf);
+u32 btf_named_start_id(const struct btf *btf, bool own);
struct btf *btf_base_btf(const struct btf *btf);
bool btf_type_is_i32(const struct btf_type *t);
bool btf_type_is_i64(const struct btf_type *t);
@@ -575,8 +576,8 @@ const char *btf_name_by_offset(const struct btf *btf, u32 offset);
const char *btf_str_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
-u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id,
- const struct bpf_prog *prog);
+u32 *btf_kfunc_flags(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog);
+bool btf_kfunc_is_allowed(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog);
u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
const struct bpf_prog *prog);
int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
index 831c1b4b626c..7acc06b22fb7 100644
--- a/include/linux/buildid.h
+++ b/include/linux/buildid.h
@@ -7,7 +7,10 @@
#define BUILD_ID_SIZE_MAX 20
struct vm_area_struct;
+struct file;
+
int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
+int build_id_parse_file(struct file *file, unsigned char *build_id, __u32 *size);
int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h
index 8afa92d15a66..1e99fda2b380 100644
--- a/include/linux/can/can-ml.h
+++ b/include/linux/can/can-ml.h
@@ -46,6 +46,12 @@
#include <linux/list.h>
#include <linux/netdevice.h>
+/* exposed CAN device capabilities for network layer */
+#define CAN_CAP_CC BIT(0) /* CAN CC aka Classical CAN */
+#define CAN_CAP_FD BIT(1) /* CAN FD */
+#define CAN_CAP_XL BIT(2) /* CAN XL */
+#define CAN_CAP_RO BIT(3) /* read-only mode (LISTEN/RESTRICTED) */
+
#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
#define CAN_EFF_RCV_HASH_BITS 10
#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
@@ -64,6 +70,7 @@ struct can_ml_priv {
#ifdef CAN_J1939
struct j1939_priv *j1939_priv;
#endif
+ u32 can_cap;
};
static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
@@ -77,4 +84,21 @@ static inline void can_set_ml_priv(struct net_device *dev,
netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
}
+static inline bool can_cap_enabled(struct net_device *dev, u32 cap)
+{
+ struct can_ml_priv *can_ml = can_get_ml_priv(dev);
+
+ if (!can_ml)
+ return false;
+
+ return (can_ml->can_cap & cap);
+}
+
+static inline void can_set_cap(struct net_device *dev, u32 cap)
+{
+ struct can_ml_priv *can_ml = can_get_ml_priv(dev);
+
+ can_ml->can_cap = cap;
+}
+
#endif /* CAN_ML_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index f6416a56e95d..6d0710d6f571 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -111,18 +111,12 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
void free_candev(struct net_device *dev);
/* a candev safe wrapper around netdev_priv */
-#if IS_ENABLED(CONFIG_CAN_NETLINK)
struct can_priv *safe_candev_priv(struct net_device *dev);
-#else
-static inline struct can_priv *safe_candev_priv(struct net_device *dev)
-{
- return NULL;
-}
-#endif
int open_candev(struct net_device *dev);
void close_candev(struct net_device *dev);
void can_set_default_mtu(struct net_device *dev);
+void can_set_cap_info(struct net_device *dev);
int __must_check can_set_static_ctrlmode(struct net_device *dev,
u32 static_mode);
int can_hwtstamp_get(struct net_device *netdev,
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index c7f2c63b3bc3..08e5dbe15ca4 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -31,6 +31,12 @@
#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
#define CEPH_INO_GLOBAL_SNAPREALM 3 /* global dummy snaprealm */
+/*
+ * name for "old" CephFS file systems,
+ * see ceph.git e2b151d009640114b2565c901d6f41f6cd5ec652
+ */
+#define CEPH_OLD_FS_NAME "cephfs"
+
/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
#define CEPH_MAX_MON 31
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index b760a3c470a5..f7cc60de0058 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -626,7 +626,13 @@ struct cgroup {
#endif
/* All ancestors including self */
- struct cgroup *ancestors[];
+ union {
+ DECLARE_FLEX_ARRAY(struct cgroup *, ancestors);
+ struct {
+ struct cgroup *_root_ancestor;
+ DECLARE_FLEX_ARRAY(struct cgroup *, _low_ancestors);
+ };
+ };
};
/*
@@ -647,16 +653,6 @@ struct cgroup_root {
struct list_head root_list;
struct rcu_head rcu; /* Must be near the top */
- /*
- * The root cgroup. The containing cgroup_root will be destroyed on its
- * release. cgrp->ancestors[0] will be used overflowing into the
- * following field. cgrp_ancestor_storage must immediately follow.
- */
- struct cgroup cgrp;
-
- /* must follow cgrp for cgrp->ancestors[0], see above */
- struct cgroup *cgrp_ancestor_storage;
-
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
@@ -668,6 +664,13 @@ struct cgroup_root {
/* The name for this hierarchy - may be empty */
char name[MAX_CGROUP_ROOT_NAMELEN];
+
+ /*
+ * The root cgroup. The containing cgroup_root will be destroyed on its
+ * release. This must be embedded last due to flexible array at the end
+ * of struct cgroup.
+ */
+ struct cgroup cgrp;
};
/*
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 8d41b917c77d..dbc4162921e9 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -278,16 +278,21 @@ const volatile void * __must_check_fn(const volatile void *val)
#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
typedef _type class_##_name##_t; \
+typedef _type lock_##_name##_t; \
static __always_inline void class_##_name##_destructor(_type *p) \
+ __no_context_analysis \
{ _type _T = *p; _exit; } \
static __always_inline _type class_##_name##_constructor(_init_args) \
+ __no_context_analysis \
{ _type t = _init; return t; }
#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
+typedef lock_##_name##_t lock_##_name##ext##_t; \
typedef class_##_name##_t class_##_name##ext##_t; \
static __always_inline void class_##_name##ext##_destructor(class_##_name##_t *p) \
{ class_##_name##_destructor(p); } \
static __always_inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+ __no_context_analysis \
{ class_##_name##_t t = _init; return t; }
#define CLASS(_name, var) \
@@ -474,35 +479,80 @@ _label: \
*/
#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \
+typedef _type lock_##_name##_t; \
typedef struct { \
_type *lock; \
__VA_ARGS__; \
} class_##_name##_t; \
\
static __always_inline void class_##_name##_destructor(class_##_name##_t *_T) \
+ __no_context_analysis \
{ \
if (!__GUARD_IS_ERR(_T->lock)) { _unlock; } \
} \
\
__DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
-#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \
+#define __DEFINE_LOCK_GUARD_1(_name, _type, ...) \
static __always_inline class_##_name##_t class_##_name##_constructor(_type *l) \
+ __no_context_analysis \
{ \
class_##_name##_t _t = { .lock = l }, *_T = &_t; \
- _lock; \
+ __VA_ARGS__; \
return _t; \
}
-#define __DEFINE_LOCK_GUARD_0(_name, _lock) \
+#define __DEFINE_LOCK_GUARD_0(_name, ...) \
static __always_inline class_##_name##_t class_##_name##_constructor(void) \
+ __no_context_analysis \
{ \
class_##_name##_t _t = { .lock = (void*)1 }, \
*_T __maybe_unused = &_t; \
- _lock; \
+ __VA_ARGS__; \
return _t; \
}
+#define DECLARE_LOCK_GUARD_0_ATTRS(_name, _lock, _unlock) \
+static inline class_##_name##_t class_##_name##_constructor(void) _lock;\
+static inline void class_##_name##_destructor(class_##_name##_t *_T) _unlock;
+
+/*
+ * To support Context Analysis, we need to allow the compiler to see the
+ * acquisition and release of the context lock. However, the "cleanup" helpers
+ * wrap the lock in a struct passed through separate helper functions, which
+ * hides the lock alias from the compiler (no inter-procedural analysis).
+ *
+ * To make it work, we introduce an explicit alias to the context lock instance
+ * that is "cleaned" up with a separate cleanup helper. This helper is a dummy
+ * function that does nothing at runtime, but has the "_unlock" attribute to
+ * tell the compiler what happens at the end of the scope.
+ *
+ * To generalize the pattern, the WITH_LOCK_GUARD_1_ATTRS() macro should be used
+ * to redefine the constructor, which then also creates the alias variable with
+ * the right "cleanup" attribute, *after* DECLARE_LOCK_GUARD_1_ATTRS() has been
+ * used.
+ *
+ * Example usage:
+ *
+ * DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
+ * #define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T)
+ *
+ * Note: To support the for-loop based scoped helpers, the auxiliary variable
+ * must be a pointer to the "class" type because it is defined in the same
+ * statement as the guard variable. However, we initialize it with the lock
+ * pointer (despite the type mismatch, the compiler's alias analysis still works
+ * as expected). The "_unlock" attribute receives a pointer to the auxiliary
+ * variable (a double pointer to the class type), and must be cast and
+ * dereferenced appropriately.
+ */
+#define DECLARE_LOCK_GUARD_1_ATTRS(_name, _lock, _unlock) \
+static inline class_##_name##_t class_##_name##_constructor(lock_##_name##_t *_T) _lock;\
+static __always_inline void __class_##_name##_cleanup_ctx(class_##_name##_t **_T) \
+ __no_context_analysis _unlock { }
+#define WITH_LOCK_GUARD_1_ATTRS(_name, _T) \
+ class_##_name##_constructor(_T), \
+ *__UNIQUE_ID(unlock) __cleanup(__class_##_name##_cleanup_ctx) = (void *)(unsigned long)(_T)
+
#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 62d9c1cf6326..2e6931735880 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -57,6 +57,15 @@ extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long e
extern void cma_reserve_pages_on_error(struct cma *cma);
+#ifdef CONFIG_DMA_CMA
+extern bool cma_skip_dt_default_reserved_mem(void);
+#else
+static inline bool cma_skip_dt_default_reserved_mem(void)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_CMA
struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
bool cma_free_folio(struct cma *cma, const struct folio *folio);
diff --git a/include/linux/compiler-context-analysis.h b/include/linux/compiler-context-analysis.h
new file mode 100644
index 000000000000..00c074a2ccb0
--- /dev/null
+++ b/include/linux/compiler-context-analysis.h
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Macros and attributes for compiler-based static context analysis.
+ */
+
+#ifndef _LINUX_COMPILER_CONTEXT_ANALYSIS_H
+#define _LINUX_COMPILER_CONTEXT_ANALYSIS_H
+
+#if defined(WARN_CONTEXT_ANALYSIS) && !defined(__CHECKER__) && !defined(__GENKSYMS__)
+
+/*
+ * These attributes define new context lock (Clang: capability) types.
+ * Internal only.
+ */
+# define __ctx_lock_type(name) __attribute__((capability(#name)))
+# define __reentrant_ctx_lock __attribute__((reentrant_capability))
+# define __acquires_ctx_lock(...) __attribute__((acquire_capability(__VA_ARGS__)))
+# define __acquires_shared_ctx_lock(...) __attribute__((acquire_shared_capability(__VA_ARGS__)))
+# define __try_acquires_ctx_lock(ret, var) __attribute__((try_acquire_capability(ret, var)))
+# define __try_acquires_shared_ctx_lock(ret, var) __attribute__((try_acquire_shared_capability(ret, var)))
+# define __releases_ctx_lock(...) __attribute__((release_capability(__VA_ARGS__)))
+# define __releases_shared_ctx_lock(...) __attribute__((release_shared_capability(__VA_ARGS__)))
+# define __returns_ctx_lock(var) __attribute__((lock_returned(var)))
+
+/*
+ * The below are used to annotate code being checked. Internal only.
+ */
+# define __excludes_ctx_lock(...) __attribute__((locks_excluded(__VA_ARGS__)))
+# define __requires_ctx_lock(...) __attribute__((requires_capability(__VA_ARGS__)))
+# define __requires_shared_ctx_lock(...) __attribute__((requires_shared_capability(__VA_ARGS__)))
+
+/*
+ * The "assert_capability" attribute is a bit confusingly named. It does not
+ * generate a check. Instead, it tells the analysis to *assume* the capability
+ * is held. This is used for augmenting runtime assertions, that can then help
+ * with patterns beyond the compiler's static reasoning abilities.
+ */
+# define __assumes_ctx_lock(...) __attribute__((assert_capability(__VA_ARGS__)))
+# define __assumes_shared_ctx_lock(...) __attribute__((assert_shared_capability(__VA_ARGS__)))
+
+/**
+ * __guarded_by - struct member and globals attribute, declares variable
+ * only accessible within active context
+ *
+ * Declares that the struct member or global variable is only accessible within
+ * the context entered by the given context lock. Read operations on the data
+ * require shared access, while write operations require exclusive access.
+ *
+ * .. code-block:: c
+ *
+ * struct some_state {
+ * spinlock_t lock;
+ * long counter __guarded_by(&lock);
+ * };
+ */
+# define __guarded_by(...) __attribute__((guarded_by(__VA_ARGS__)))
+
+/**
+ * __pt_guarded_by - struct member and globals attribute, declares pointed-to
+ * data only accessible within active context
+ *
+ * Declares that the data pointed to by the struct member pointer or global
+ * pointer is only accessible within the context entered by the given context
+ * lock. Read operations on the data require shared access, while write
+ * operations require exclusive access.
+ *
+ * .. code-block:: c
+ *
+ * struct some_state {
+ * spinlock_t lock;
+ * long *counter __pt_guarded_by(&lock);
+ * };
+ */
+# define __pt_guarded_by(...) __attribute__((pt_guarded_by(__VA_ARGS__)))
+
+/**
+ * context_lock_struct() - declare or define a context lock struct
+ * @name: struct name
+ *
+ * Helper to declare or define a struct type that is also a context lock.
+ *
+ * .. code-block:: c
+ *
+ * context_lock_struct(my_handle) {
+ * int foo;
+ * long bar;
+ * };
+ *
+ * struct some_state {
+ * ...
+ * };
+ * // ... declared elsewhere ...
+ * context_lock_struct(some_state);
+ *
+ * Note: The implementation defines several helper functions that can acquire
+ * and release the context lock.
+ */
+# define context_lock_struct(name, ...) \
+ struct __ctx_lock_type(name) __VA_ARGS__ name; \
+ static __always_inline void __acquire_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __acquires_ctx_lock(var) { } \
+ static __always_inline void __acquire_shared_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __acquires_shared_ctx_lock(var) { } \
+ static __always_inline bool __try_acquire_ctx_lock(const struct name *var, bool ret) \
+ __attribute__((overloadable)) __no_context_analysis __try_acquires_ctx_lock(1, var) \
+ { return ret; } \
+ static __always_inline bool __try_acquire_shared_ctx_lock(const struct name *var, bool ret) \
+ __attribute__((overloadable)) __no_context_analysis __try_acquires_shared_ctx_lock(1, var) \
+ { return ret; } \
+ static __always_inline void __release_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __releases_ctx_lock(var) { } \
+ static __always_inline void __release_shared_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __releases_shared_ctx_lock(var) { } \
+ static __always_inline void __assume_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __assumes_ctx_lock(var) { } \
+ static __always_inline void __assume_shared_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __assumes_shared_ctx_lock(var) { } \
+ struct name
+
+/**
+ * disable_context_analysis() - disables context analysis
+ *
+ * Disables context analysis. Must be paired with a later
+ * enable_context_analysis().
+ */
+# define disable_context_analysis() \
+ __diag_push(); \
+ __diag_ignore_all("-Wunknown-warning-option", "") \
+ __diag_ignore_all("-Wthread-safety", "") \
+ __diag_ignore_all("-Wthread-safety-pointer", "")
+
+/**
+ * enable_context_analysis() - re-enables context analysis
+ *
+ * Re-enables context analysis. Must be paired with a prior
+ * disable_context_analysis().
+ */
+# define enable_context_analysis() __diag_pop()
+
+/**
+ * __no_context_analysis - function attribute, disables context analysis
+ *
+ * Function attribute denoting that context analysis is disabled for the
+ * whole function. Prefer use of `context_unsafe()` where possible.
+ */
+# define __no_context_analysis __attribute__((no_thread_safety_analysis))
+
+#else /* !WARN_CONTEXT_ANALYSIS */
+
+# define __ctx_lock_type(name)
+# define __reentrant_ctx_lock
+# define __acquires_ctx_lock(...)
+# define __acquires_shared_ctx_lock(...)
+# define __try_acquires_ctx_lock(ret, var)
+# define __try_acquires_shared_ctx_lock(ret, var)
+# define __releases_ctx_lock(...)
+# define __releases_shared_ctx_lock(...)
+# define __assumes_ctx_lock(...)
+# define __assumes_shared_ctx_lock(...)
+# define __returns_ctx_lock(var)
+# define __guarded_by(...)
+# define __pt_guarded_by(...)
+# define __excludes_ctx_lock(...)
+# define __requires_ctx_lock(...)
+# define __requires_shared_ctx_lock(...)
+# define __acquire_ctx_lock(var) do { } while (0)
+# define __acquire_shared_ctx_lock(var) do { } while (0)
+# define __try_acquire_ctx_lock(var, ret) (ret)
+# define __try_acquire_shared_ctx_lock(var, ret) (ret)
+# define __release_ctx_lock(var) do { } while (0)
+# define __release_shared_ctx_lock(var) do { } while (0)
+# define __assume_ctx_lock(var) do { (void)(var); } while (0)
+# define __assume_shared_ctx_lock(var) do { (void)(var); } while (0)
+# define context_lock_struct(name, ...) struct __VA_ARGS__ name
+# define disable_context_analysis()
+# define enable_context_analysis()
+# define __no_context_analysis
+
+#endif /* WARN_CONTEXT_ANALYSIS */
+
+/**
+ * context_unsafe() - disable context checking for contained code
+ *
+ * Disables context checking for contained statements or expression.
+ *
+ * .. code-block:: c
+ *
+ * struct some_data {
+ * spinlock_t lock;
+ * int counter __guarded_by(&lock);
+ * };
+ *
+ * int foo(struct some_data *d)
+ * {
+ * // ...
+ * // other code that is still checked ...
+ * // ...
+ * return context_unsafe(d->counter);
+ * }
+ */
+#define context_unsafe(...) \
+({ \
+ disable_context_analysis(); \
+ __VA_ARGS__; \
+ enable_context_analysis() \
+})
+
+/**
+ * __context_unsafe() - function attribute, disable context checking
+ * @comment: comment explaining why opt-out is safe
+ *
+ * Function attribute denoting that context analysis is disabled for the
+ * whole function. Forces adding an inline comment as argument.
+ */
+#define __context_unsafe(comment) __no_context_analysis
+
+/**
+ * context_unsafe_alias() - helper to insert a context lock "alias barrier"
+ * @p: pointer aliasing a context lock or object containing context locks
+ *
+ * No-op function that acts as a "context lock alias barrier", where the
+ * analysis rightfully detects that we're switching aliases, but the switch is
+ * considered safe but beyond the analysis reasoning abilities.
+ *
+ * This should be inserted before the first use of such an alias.
+ *
+ * Implementation Note: The compiler ignores aliases that may be reassigned but
+ * their value cannot be determined (e.g. when passing a non-const pointer to an
+ * alias as a function argument).
+ */
+#define context_unsafe_alias(p) _context_unsafe_alias((void **)&(p))
+static inline void _context_unsafe_alias(void **p) { }
+
+/**
+ * token_context_lock() - declare an abstract global context lock instance
+ * @name: token context lock name
+ *
+ * Helper that declares an abstract global context lock instance @name, but not
+ * backed by a real data structure (linker error if accidentally referenced).
+ * The type name is `__ctx_lock_@name`.
+ */
+#define token_context_lock(name, ...) \
+ context_lock_struct(__ctx_lock_##name, ##__VA_ARGS__) {}; \
+ extern const struct __ctx_lock_##name *name
+
+/**
+ * token_context_lock_instance() - declare another instance of a global context lock
+ * @ctx: token context lock previously declared with token_context_lock()
+ * @name: name of additional global context lock instance
+ *
+ * Helper that declares an additional instance @name of the same token context
+ * lock class @ctx. This is helpful where multiple related token contexts are
+ * declared, to allow using the same underlying type (`__ctx_lock_@ctx`) as
+ * function arguments.
+ */
+#define token_context_lock_instance(ctx, name) \
+ extern const struct __ctx_lock_##ctx *name
+
+/*
+ * Common keywords for static context analysis.
+ */
+
+/**
+ * __must_hold() - function attribute, caller must hold exclusive context lock
+ *
+ * Function attribute declaring that the caller must hold the given context
+ * lock instance(s) exclusively.
+ */
+#define __must_hold(...) __requires_ctx_lock(__VA_ARGS__)
+
+/**
+ * __must_not_hold() - function attribute, caller must not hold context lock
+ *
+ * Function attribute declaring that the caller must not hold the given context
+ * lock instance(s).
+ */
+#define __must_not_hold(...) __excludes_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquires() - function attribute, function acquires context lock exclusively
+ *
+ * Function attribute declaring that the function acquires the given context
+ * lock instance(s) exclusively, but does not release them.
+ */
+#define __acquires(...) __acquires_ctx_lock(__VA_ARGS__)
+
+/*
+ * Clang's analysis does not care precisely about the value, only that it is
+ * either zero or non-zero. So the __cond_acquires() interface might be
+ * misleading if we say that @ret is the value returned if acquired. Instead,
+ * provide symbolic variants which we translate.
+ */
+#define __cond_acquires_impl_true(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_false(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+#define __cond_acquires_impl_nonzero(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_0(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+#define __cond_acquires_impl_nonnull(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_NULL(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+
+/**
+ * __cond_acquires() - function attribute, function conditionally
+ * acquires a context lock exclusively
+ * @ret: abstract value returned by function if context lock acquired
+ * @x: context lock instance pointer
+ *
+ * Function attribute declaring that the function conditionally acquires the
+ * given context lock instance @x exclusively, but does not release it. The
+ * function return value @ret denotes when the context lock is acquired.
+ *
+ * @ret may be one of: true, false, nonzero, 0, nonnull, NULL.
+ */
+#define __cond_acquires(ret, x) __cond_acquires_impl_##ret(x)
+
+/**
+ * __releases() - function attribute, function releases a context lock exclusively
+ *
+ * Function attribute declaring that the function releases the given context
+ * lock instance(s) exclusively. The associated context(s) must be active on
+ * entry.
+ */
+#define __releases(...) __releases_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquire() - function to acquire context lock exclusively
+ * @x: context lock instance pointer
+ *
+ * No-op function that acquires the given context lock instance @x exclusively.
+ */
+#define __acquire(x) __acquire_ctx_lock(x)
+
+/**
+ * __release() - function to release context lock exclusively
+ * @x: context lock instance pointer
+ *
+ * No-op function that releases the given context lock instance @x.
+ */
+#define __release(x) __release_ctx_lock(x)
+
+/**
+ * __must_hold_shared() - function attribute, caller must hold shared context lock
+ *
+ * Function attribute declaring that the caller must hold the given context
+ * lock instance(s) with shared access.
+ */
+#define __must_hold_shared(...) __requires_shared_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquires_shared() - function attribute, function acquires context lock shared
+ *
+ * Function attribute declaring that the function acquires the given
+ * context lock instance(s) with shared access, but does not release them.
+ */
+#define __acquires_shared(...) __acquires_shared_ctx_lock(__VA_ARGS__)
+
+/**
+ * __cond_acquires_shared() - function attribute, function conditionally
+ * acquires a context lock shared
+ * @ret: abstract value returned by function if context lock acquired
+ * @x: context lock instance pointer
+ *
+ * Function attribute declaring that the function conditionally acquires the
+ * given context lock instance @x with shared access, but does not release it.
+ * The function return value @ret denotes when the context lock is acquired.
+ *
+ * @ret may be one of: true, false, nonzero, 0, nonnull, NULL.
+ */
+#define __cond_acquires_shared(ret, x) __cond_acquires_impl_##ret(x, _shared)
+
+/**
+ * __releases_shared() - function attribute, function releases a
+ * context lock shared
+ *
+ * Function attribute declaring that the function releases the given context
+ * lock instance(s) with shared access. The associated context(s) must be
+ * active on entry.
+ */
+#define __releases_shared(...) __releases_shared_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquire_shared() - function to acquire context lock shared
+ * @x: context lock instance pointer
+ *
+ * No-op function that acquires the given context lock instance @x with shared
+ * access.
+ */
+#define __acquire_shared(x) __acquire_shared_ctx_lock(x)
+
+/**
+ * __release_shared() - function to release context lock shared
+ * @x: context lock instance pointer
+ *
+ * No-op function that releases the given context lock instance @x with shared
+ * access.
+ */
+#define __release_shared(x) __release_shared_ctx_lock(x)
+
+/**
+ * __acquire_ret() - helper to acquire context lock of return value
+ * @call: call expression
+ * @ret_expr: acquire expression that uses __ret
+ */
+#define __acquire_ret(call, ret_expr) \
+ ({ \
+ __auto_type __ret = call; \
+ __acquire(ret_expr); \
+ __ret; \
+ })
+
+/**
+ * __acquire_shared_ret() - helper to acquire context lock shared of return value
+ * @call: call expression
+ * @ret_expr: acquire shared expression that uses __ret
+ */
+#define __acquire_shared_ret(call, ret_expr) \
+ ({ \
+ __auto_type __ret = call; \
+ __acquire_shared(ret_expr); \
+ __ret; \
+ })
+
+/*
+ * Attributes to mark functions returning acquired context locks.
+ *
+ * This is purely cosmetic to help readability, and should be used with the
+ * above macros as follows:
+ *
+ * struct foo { spinlock_t lock; ... };
+ * ...
+ * #define myfunc(...) __acquire_ret(_myfunc(__VA_ARGS__), &__ret->lock)
+ * struct foo *_myfunc(int bar) __acquires_ret;
+ * ...
+ */
+#define __acquires_ret __no_context_analysis
+#define __acquires_shared_ret __no_context_analysis
+
+#endif /* _LINUX_COMPILER_CONTEXT_ANALYSIS_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 04487c9bd751..af16624b29fd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -190,7 +190,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define data_race(expr) \
({ \
__kcsan_disable_current(); \
+ disable_context_analysis(); \
auto __v = (expr); \
+ enable_context_analysis(); \
__kcsan_enable_current(); \
__v; \
})
@@ -231,16 +233,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
"must be non-C-string (not NUL-terminated)")
/*
- * Use __typeof_unqual__() when available.
- *
- * XXX: Remove test for __CHECKER__ once
- * sparse learns about __typeof_unqual__().
- */
-#if CC_HAS_TYPEOF_UNQUAL && !defined(__CHECKER__)
-# define USE_TYPEOF_UNQUAL 1
-#endif
-
-/*
* Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof
* operator when available, to return an unqualified type of the exp.
*/
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index d3318a3c2577..3c936b129860 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -41,6 +41,8 @@
# define BTF_TYPE_TAG(value) /* nothing */
#endif
+#include <linux/compiler-context-analysis.h>
+
/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
#ifdef __CHECKER__
/* address spaces */
@@ -51,14 +53,6 @@
# define __rcu __attribute__((noderef, address_space(__rcu)))
static inline void __chk_user_ptr(const volatile void __user *ptr) { }
static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
-/* context/locking */
-# define __must_hold(x) __attribute__((context(x,1,1)))
-# define __acquires(x) __attribute__((context(x,0,1)))
-# define __cond_acquires(x) __attribute__((context(x,0,-1)))
-# define __releases(x) __attribute__((context(x,1,0)))
-# define __acquire(x) __context__(x,1)
-# define __release(x) __context__(x,-1)
-# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
/* other */
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
@@ -79,14 +73,6 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
-/* context/locking */
-# define __must_hold(x)
-# define __acquires(x)
-# define __cond_acquires(x)
-# define __releases(x)
-# define __acquire(x) (void)0
-# define __release(x) (void)0
-# define __cond_lock(x,c) (c)
/* other */
# define __force
# define __nocast
@@ -369,7 +355,7 @@ struct ftrace_likely_data {
* Optional: only supported since clang >= 18
*
* gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
- * clang: https://github.com/llvm/llvm-project/pull/76348
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#counted-by-counted-by-or-null-sized-by-sized-by-or-null
*
* __bdos on clang < 19.1.2 can erroneously return 0:
* https://github.com/llvm/llvm-project/pull/110497
@@ -384,6 +370,22 @@ struct ftrace_likely_data {
#endif
/*
+ * Runtime track number of objects pointed to by a pointer member for use by
+ * CONFIG_FORTIFY_SOURCE and CONFIG_UBSAN_BOUNDS.
+ *
+ * Optional: only supported since gcc >= 16
+ * Optional: only supported since clang >= 22
+ *
+ * gcc: https://gcc.gnu.org/pipermail/gcc-patches/2025-April/681727.html
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#counted-by-counted-by-or-null-sized-by-sized-by-or-null
+ */
+#ifdef CONFIG_CC_HAS_COUNTED_BY_PTR
+#define __counted_by_ptr(member) __attribute__((__counted_by__(member)))
+#else
+#define __counted_by_ptr(member)
+#endif
+
+/*
* Optional: only supported since gcc >= 15
* Optional: not supported by Clang
*
@@ -536,6 +538,37 @@ struct ftrace_likely_data {
#endif
/*
+ * Optional: only supported since gcc >= 15, clang >= 19
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html#index-_005f_005fbuiltin_005fcounted_005fby_005fref
+ * clang: https://clang.llvm.org/docs/LanguageExtensions.html#builtin-counted-by-ref
+ */
+#if __has_builtin(__builtin_counted_by_ref)
+/**
+ * __flex_counter() - Get pointer to counter member for the given
+ * flexible array, if it was annotated with __counted_by()
+ * @FAM: Pointer to flexible array member of an addressable struct instance
+ *
+ * For example, with:
+ *
+ * struct foo {
+ * int counter;
+ * short array[] __counted_by(counter);
+ * } *p;
+ *
+ * __flex_counter(p->array) will resolve to &p->counter.
+ *
+ * Note that Clang may not allow this to be assigned to a separate
+ * variable; it must be used directly.
+ *
+ * If p->array is unannotated, this returns (void *)NULL.
+ */
+#define __flex_counter(FAM) __builtin_counted_by_ref(FAM)
+#else
+#define __flex_counter(FAM) ((void *)NULL)
+#endif
+
+/*
* Some versions of gcc do not mark 'asm goto' volatile:
*
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
@@ -562,6 +595,14 @@ struct ftrace_likely_data {
#define asm_inline asm
#endif
+#ifndef __ASSEMBLY__
+/*
+ * Use __typeof_unqual__() when available.
+ */
+#if CC_HAS_TYPEOF_UNQUAL || defined(__CHECKER__)
+# define USE_TYPEOF_UNQUAL 1
+#endif
+
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
@@ -569,6 +610,7 @@ struct ftrace_likely_data {
* __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
* non-scalar types unchanged.
*/
+#ifndef USE_TYPEOF_UNQUAL
/*
* Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
* is not type-compatible with 'signed char', and we define a separate case.
@@ -586,6 +628,29 @@ struct ftrace_likely_data {
__scalar_type_to_expr_cases(long), \
__scalar_type_to_expr_cases(long long), \
default: (x)))
+#else
+#define __unqual_scalar_typeof(x) __typeof_unqual__(x)
+#endif
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * __signed_scalar_typeof(x) - Declare a signed scalar type, leaving
+ * non-scalar types unchanged.
+ */
+
+#define __scalar_type_to_signed_cases(type) \
+ unsigned type: (signed type)0, \
+ signed type: (signed type)0
+
+#define __signed_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (signed char)0, \
+ __scalar_type_to_signed_cases(char), \
+ __scalar_type_to_signed_cases(short), \
+ __scalar_type_to_signed_cases(int), \
+ __scalar_type_to_signed_cases(long), \
+ __scalar_type_to_signed_cases(long long), \
+ default: (x)))
/* Is this type a native word size -- useful for atomic operations */
#define __native_word(t) \
diff --git a/include/linux/console.h b/include/linux/console.h
index fc9f5c5c1b04..f882833bedf0 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -492,8 +492,8 @@ static inline bool console_srcu_read_lock_is_held(void)
extern int console_srcu_read_lock(void);
extern void console_srcu_read_unlock(int cookie);
-extern void console_list_lock(void) __acquires(console_mutex);
-extern void console_list_unlock(void) __releases(console_mutex);
+extern void console_list_lock(void);
+extern void console_list_unlock(void);
extern struct hlist_head console_list;
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 5b1236d8c65b..440b35e459e5 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -595,7 +595,8 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *,
const char *cper_mem_err_unpack(struct trace_seq *,
struct cper_mem_err_compact *);
void cper_print_proc_arm(const char *pfx,
- const struct cper_sec_proc_arm *proc);
+ const struct cper_sec_proc_arm *proc,
+ u32 length);
void cper_print_proc_ia(const char *pfx,
const struct cper_sec_proc_ia *proc);
int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 0465d1e6f72a..cc894fc38971 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -203,6 +203,7 @@ struct cpufreq_freqs {
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
+struct cpufreq_policy *cpufreq_cpu_policy(unsigned int cpu);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
#else
@@ -210,6 +211,10 @@ static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{
return NULL;
}
+static inline struct cpufreq_policy *cpufreq_cpu_policy(unsigned int cpu)
+{
+ return NULL;
+}
static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
return NULL;
diff --git a/include/linux/cpuhplock.h b/include/linux/cpuhplock.h
index f7aa20f62b87..286b3ab92e15 100644
--- a/include/linux/cpuhplock.h
+++ b/include/linux/cpuhplock.h
@@ -13,6 +13,7 @@
struct device;
extern int lockdep_is_cpus_held(void);
+extern int lockdep_is_cpus_write_held(void);
#ifdef CONFIG_HOTPLUG_CPU
void cpus_write_lock(void);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index a98d3330385c..a4aa2f1767d0 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -18,6 +18,8 @@
#include <linux/mmu_context.h>
#include <linux/jump_label.h>
+extern bool lockdep_is_cpuset_held(void);
+
#ifdef CONFIG_CPUSETS
/*
@@ -77,7 +79,6 @@ extern void cpuset_unlock(void);
extern void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
-extern bool cpuset_cpu_is_isolated(int cpu);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -213,11 +214,6 @@ static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
return false;
}
-static inline bool cpuset_cpu_is_isolated(int cpu)
-{
- return false;
-}
-
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
return node_possible_map;
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 343a140a6ba2..ed1609d78cd7 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -164,7 +164,6 @@ static inline const struct cred *kernel_cred(void)
return rcu_dereference_raw(init_task.cred);
}
extern int set_security_override(struct cred *, u32);
-extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 7cecda29447e..4177c4738282 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -239,18 +239,16 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
* @cancel: callback to call
* @cancel_data: extra data for the callback to call
*/
-struct debugfs_cancellation {
+context_lock_struct(debugfs_cancellation) {
struct list_head list;
void (*cancel)(struct dentry *, void *);
void *cancel_data;
};
-void __acquires(cancellation)
-debugfs_enter_cancellation(struct file *file,
- struct debugfs_cancellation *cancellation);
-void __releases(cancellation)
-debugfs_leave_cancellation(struct file *file,
- struct debugfs_cancellation *cancellation);
+void debugfs_enter_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation) __acquires(cancellation);
+void debugfs_leave_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation) __releases(cancellation);
#else
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index cd8e0f0a634b..bbc67ec513ed 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -85,6 +85,8 @@ enum probe_type {
* uevent.
* @p: Driver core's private data, no one other than the driver
* core can touch this.
+ * @p_cb: Callbacks private to the driver core; no one other than the
+ * driver core is allowed to touch this.
*
* The device driver-model tracks all of the drivers known to the system.
* The main reason for this tracking is to enable the driver core to match
@@ -119,6 +121,13 @@ struct device_driver {
void (*coredump) (struct device *dev);
struct driver_private *p;
+ struct {
+ /*
+ * Called after remove() and after all devres entries have been
+ * processed. This is a Rust only callback.
+ */
+ void (*post_unbind_rust)(struct device *dev);
+ } p_cb;
};
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 0864773a57e8..822085bc2d20 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -21,7 +21,7 @@ static inline int devcgroup_inode_permission(struct inode *inode, int mask)
if (likely(!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)))
return 0;
- if (likely(!inode->i_rdev))
+ if (!inode->i_rdev)
return 0;
if (S_ISBLK(inode->i_mode))
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 2a43094e23f7..664898d09ff5 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -406,11 +406,12 @@ void efi_native_runtime_setup(void);
#define EFI_CC_FINAL_EVENTS_TABLE_GUID EFI_GUID(0xdd4a4648, 0x2de7, 0x4665, 0x96, 0x4d, 0x21, 0xd9, 0xef, 0x5f, 0xb4, 0x46)
/*
- * This GUID is used to pass to the kernel proper the struct screen_info
- * structure that was populated by the stub based on the GOP protocol instance
- * associated with ConOut
+ * This GUIDs are used to pass to the kernel proper the primary
+ * display that has been populated by the stub based on the GOP
+ * instance associated with ConOut.
*/
-#define LINUX_EFI_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_PRIMARY_DISPLAY_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+
#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 43aa6153dc57..e7497f804644 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -18,7 +18,7 @@
* @power: The power consumed at this level (by 1 CPU or by a registered
* device). It can be a total power: static and dynamic.
* @cost: The cost coefficient associated with this level, used during
- * energy calculation. Equal to: power * max_frequency / frequency
+ * energy calculation. Equal to: 10 * power * max_frequency / frequency
* @flags: see "em_perf_state flags" description below.
*/
struct em_perf_state {
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index 87efb38b7081..f83ca0abf2cd 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -2,6 +2,7 @@
#ifndef __LINUX_ENTRYCOMMON_H
#define __LINUX_ENTRYCOMMON_H
+#include <linux/audit.h>
#include <linux/irq-entry-common.h>
#include <linux/livepatch.h>
#include <linux/ptrace.h>
@@ -36,8 +37,8 @@
SYSCALL_WORK_SYSCALL_EMU | \
SYSCALL_WORK_SYSCALL_AUDIT | \
SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ SYSCALL_WORK_SYSCALL_RSEQ_SLICE | \
ARCH_SYSCALL_WORK_ENTER)
-
#define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \
SYSCALL_WORK_SYSCALL_TRACE | \
SYSCALL_WORK_SYSCALL_AUDIT | \
@@ -45,7 +46,84 @@
SYSCALL_WORK_SYSCALL_EXIT_TRAP | \
ARCH_SYSCALL_WORK_EXIT)
-long syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long work);
+/**
+ * arch_ptrace_report_syscall_entry - Architecture specific ptrace_report_syscall_entry() wrapper
+ *
+ * Invoked from syscall_trace_enter() to wrap ptrace_report_syscall_entry().
+ *
+ * This allows architecture specific ptrace_report_syscall_entry()
+ * implementations. If not defined by the architecture this falls back to
+ * to ptrace_report_syscall_entry().
+ */
+static __always_inline int arch_ptrace_report_syscall_entry(struct pt_regs *regs);
+
+#ifndef arch_ptrace_report_syscall_entry
+static __always_inline int arch_ptrace_report_syscall_entry(struct pt_regs *regs)
+{
+ return ptrace_report_syscall_entry(regs);
+}
+#endif
+
+bool syscall_user_dispatch(struct pt_regs *regs);
+long trace_syscall_enter(struct pt_regs *regs, long syscall);
+void trace_syscall_exit(struct pt_regs *regs, long ret);
+
+static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
+{
+ if (unlikely(audit_context())) {
+ unsigned long args[6];
+
+ syscall_get_arguments(current, regs, args);
+ audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
+ }
+}
+
+static __always_inline long syscall_trace_enter(struct pt_regs *regs, unsigned long work)
+{
+ long syscall, ret = 0;
+
+ /*
+ * Handle Syscall User Dispatch. This must comes first, since
+ * the ABI here can be something that doesn't make sense for
+ * other syscall_work features.
+ */
+ if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
+ if (syscall_user_dispatch(regs))
+ return -1L;
+ }
+
+ /*
+ * User space got a time slice extension granted and relinquishes
+ * the CPU. The work stops the slice timer to avoid an extra round
+ * through hrtimer_interrupt().
+ */
+ if (work & SYSCALL_WORK_SYSCALL_RSEQ_SLICE)
+ rseq_syscall_enter_work(syscall_get_nr(current, regs));
+
+ /* Handle ptrace */
+ if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
+ ret = arch_ptrace_report_syscall_entry(regs);
+ if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
+ return -1L;
+ }
+
+ /* Do seccomp after ptrace, to catch any tracer changes. */
+ if (work & SYSCALL_WORK_SECCOMP) {
+ ret = __secure_computing();
+ if (ret == -1L)
+ return ret;
+ }
+
+ /* Either of the above might have changed the syscall number */
+ syscall = syscall_get_nr(current, regs);
+
+ if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
+ syscall = trace_syscall_enter(regs, syscall);
+
+ syscall_enter_audit(regs, syscall);
+
+ return ret ? : syscall;
+}
/**
* syscall_enter_from_user_mode_work - Check and handle work before invoking
@@ -75,7 +153,7 @@ static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *re
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
if (work & SYSCALL_WORK_ENTER)
- syscall = syscall_trace_enter(regs, syscall, work);
+ syscall = syscall_trace_enter(regs, work);
return syscall;
}
@@ -112,6 +190,37 @@ static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, l
return ret;
}
+/*
+ * If SYSCALL_EMU is set, then the only reason to report is when
+ * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
+ * instruction has been already reported in syscall_enter_from_user_mode().
+ */
+static __always_inline bool report_single_step(unsigned long work)
+{
+ if (work & SYSCALL_WORK_SYSCALL_EMU)
+ return false;
+
+ return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
+}
+
+/**
+ * arch_ptrace_report_syscall_exit - Architecture specific ptrace_report_syscall_exit()
+ *
+ * This allows architecture specific ptrace_report_syscall_exit()
+ * implementations. If not defined by the architecture this falls back to
+ * to ptrace_report_syscall_exit().
+ */
+static __always_inline void arch_ptrace_report_syscall_exit(struct pt_regs *regs,
+ int step);
+
+#ifndef arch_ptrace_report_syscall_exit
+static __always_inline void arch_ptrace_report_syscall_exit(struct pt_regs *regs,
+ int step)
+{
+ ptrace_report_syscall_exit(regs, step);
+}
+#endif
+
/**
* syscall_exit_work - Handle work before returning to user mode
* @regs: Pointer to current pt_regs
@@ -119,20 +228,40 @@ static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, l
*
* Do one-time syscall specific work.
*/
-void syscall_exit_work(struct pt_regs *regs, unsigned long work);
+static __always_inline void syscall_exit_work(struct pt_regs *regs, unsigned long work)
+{
+ bool step;
+
+ /*
+ * If the syscall was rolled back due to syscall user dispatching,
+ * then the tracers below are not invoked for the same reason as
+ * the entry side was not invoked in syscall_trace_enter(): The ABI
+ * of these syscalls is unknown.
+ */
+ if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
+ if (unlikely(current->syscall_dispatch.on_dispatch)) {
+ current->syscall_dispatch.on_dispatch = false;
+ return;
+ }
+ }
+
+ audit_syscall_exit(regs);
+
+ if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
+ trace_syscall_exit(regs, syscall_get_return_value(current, regs));
+
+ step = report_single_step(work);
+ if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
+ arch_ptrace_report_syscall_exit(regs, step);
+}
/**
- * syscall_exit_to_user_mode_work - Handle work before returning to user mode
+ * syscall_exit_to_user_mode_work - Handle one time work before returning to user mode
* @regs: Pointer to currents pt_regs
*
- * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling
- * exit_to_user_mode() to perform the final transition to user mode.
+ * Step 1 of syscall_exit_to_user_mode() with the same calling convention.
*
- * Calling convention is the same as for syscall_exit_to_user_mode() and it
- * returns with all work handled and interrupts disabled. The caller must
- * invoke exit_to_user_mode() before actually switching to user mode to
- * make the final state transitions. Interrupts must stay disabled between
- * return from this function and the invocation of exit_to_user_mode().
+ * The caller must invoke steps 2-3 of syscall_exit_to_user_mode() afterwards.
*/
static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
{
@@ -155,15 +284,13 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
*/
if (unlikely(work & SYSCALL_WORK_EXIT))
syscall_exit_work(regs, work);
- local_irq_disable_exit_to_user();
- syscall_exit_to_user_mode_prepare(regs);
}
/**
* syscall_exit_to_user_mode - Handle work before returning to user mode
* @regs: Pointer to currents pt_regs
*
- * Invoked with interrupts enabled and fully valid regs. Returns with all
+ * Invoked with interrupts enabled and fully valid @regs. Returns with all
* work handled, interrupts disabled such that the caller can immediately
* switch to user mode. Called from architecture specific syscall and ret
* from fork code.
@@ -176,6 +303,7 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
* - ptrace (single stepping)
*
* 2) Preparatory work
+ * - Disable interrupts
* - Exit to user mode loop (common TIF handling). Invokes
* arch_exit_to_user_mode_work() for architecture specific TIF work
* - Architecture specific one time work arch_exit_to_user_mode_prepare()
@@ -184,14 +312,17 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
* 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the
* functionality in exit_to_user_mode().
*
- * This is a combination of syscall_exit_to_user_mode_work() (1,2) and
- * exit_to_user_mode(). This function is preferred unless there is a
- * compelling architectural reason to use the separate functions.
+ * This is a combination of syscall_exit_to_user_mode_work() (1), disabling
+ * interrupts followed by syscall_exit_to_user_mode_prepare() (2) and
+ * exit_to_user_mode() (3). This function is preferred unless there is a
+ * compelling architectural reason to invoke the functions separately.
*/
static __always_inline void syscall_exit_to_user_mode(struct pt_regs *regs)
{
instrumentation_begin();
syscall_exit_to_user_mode_work(regs);
+ local_irq_disable_exit_to_user();
+ syscall_exit_to_user_mode_prepare(regs);
instrumentation_end();
exit_to_user_mode();
}
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index f0cf2714ec52..262e24d83313 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -201,9 +201,9 @@ struct handle_to_path_ctx {
* @commit_metadata: commit metadata changes to stable storage
*
* See Documentation/filesystems/nfs/exporting.rst for details on how to use
- * this interface correctly.
+ * this interface correctly and the definition of the flags.
*
- * encode_fh:
+ * @encode_fh:
* @encode_fh should store in the file handle fragment @fh (using at most
* @max_len bytes) information that can be used by @decode_fh to recover the
* file referred to by the &struct dentry @de. If @flag has CONNECTABLE bit
@@ -215,7 +215,7 @@ struct handle_to_path_ctx {
* greater than @max_len*4 bytes). On error @max_len contains the minimum
* size(in 4 byte unit) needed to encode the file handle.
*
- * fh_to_dentry:
+ * @fh_to_dentry:
* @fh_to_dentry is given a &struct super_block (@sb) and a file handle
* fragment (@fh, @fh_len). It should return a &struct dentry which refers
* to the same file that the file handle fragment refers to. If it cannot,
@@ -227,31 +227,44 @@ struct handle_to_path_ctx {
* created with d_alloc_root. The caller can then find any other extant
* dentries by following the d_alias links.
*
- * fh_to_parent:
+ * @fh_to_parent:
* Same as @fh_to_dentry, except that it returns a pointer to the parent
* dentry if it was encoded into the filehandle fragment by @encode_fh.
*
- * get_name:
+ * @get_name:
* @get_name should find a name for the given @child in the given @parent
* directory. The name should be stored in the @name (with the
- * understanding that it is already pointing to a %NAME_MAX+1 sized
+ * understanding that it is already pointing to a %NAME_MAX + 1 sized
* buffer. get_name() should return %0 on success, a negative error code
* or error. @get_name will be called without @parent->i_rwsem held.
*
- * get_parent:
+ * @get_parent:
* @get_parent should find the parent directory for the given @child which
* is also a directory. In the event that it cannot be found, or storage
* space cannot be allocated, a %ERR_PTR should be returned.
*
- * permission:
+ * @permission:
* Allow filesystems to specify a custom permission function.
*
- * open:
+ * @open:
* Allow filesystems to specify a custom open function.
*
- * commit_metadata:
+ * @commit_metadata:
* @commit_metadata should commit metadata changes to stable storage.
*
+ * @get_uuid:
+ * Get a filesystem unique signature exposed to clients.
+ *
+ * @map_blocks:
+ * Map and, if necessary, allocate blocks for a layout.
+ *
+ * @commit_blocks:
+ * Commit blocks in a layout once the client is done with them.
+ *
+ * @flags:
+ * Allows the filesystem to communicate to nfsd that it may want to do things
+ * differently when dealing with it.
+ *
* Locking rules:
* get_parent is called with child->d_inode->i_rwsem down
* get_name is not (which is possibly inconsistent)
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
index 54b824c05299..d2c9740e26a8 100644
--- a/include/linux/filelock.h
+++ b/include/linux/filelock.h
@@ -49,6 +49,7 @@ struct lease_manager_operations {
int (*lm_change)(struct file_lease *, int, struct list_head *);
void (*lm_setup)(struct file_lease *, void **);
bool (*lm_breaker_owns_lease)(struct file_lease *);
+ int (*lm_open_conflict)(struct file *, int);
};
struct lock_manager {
@@ -241,7 +242,14 @@ bool locks_owner_has_blockers(struct file_lock_context *flctx,
static inline struct file_lock_context *
locks_inode_context(const struct inode *inode)
{
- return smp_load_acquire(&inode->i_flctx);
+ /*
+ * Paired with smp_store_release in locks_get_lock_context().
+ *
+ * Ensures ->i_flctx will be visible if we spotted the flag.
+ */
+ if (likely(!(smp_load_acquire(&inode->i_opflags) & IOP_FLCTX)))
+ return NULL;
+ return READ_ONCE(inode->i_flctx);
}
#else /* !CONFIG_FILE_LOCKING */
@@ -468,7 +476,7 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
* could end up racing with tasks trying to set a new lease on this
* file.
*/
- flctx = READ_ONCE(inode->i_flctx);
+ flctx = locks_inode_context(inode);
if (!flctx)
return 0;
smp_mb();
@@ -487,7 +495,7 @@ static inline int break_deleg(struct inode *inode, unsigned int flags)
* could end up racing with tasks trying to set a new lease on this
* file.
*/
- flctx = READ_ONCE(inode->i_flctx);
+ flctx = locks_inode_context(inode);
if (!flctx)
return 0;
smp_mb();
@@ -532,8 +540,11 @@ static inline int break_deleg_wait(struct delegated_inode *di)
static inline int break_layout(struct inode *inode, bool wait)
{
+ struct file_lock_context *flctx;
+
smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) {
+ flctx = locks_inode_context(inode);
+ if (flctx && !list_empty_careful(&flctx->flc_lease)) {
unsigned int flags = LEASE_BREAK_LAYOUT;
if (!wait)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fd54fed8f95f..4e1cb4f91f49 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1167,6 +1167,7 @@ bool bpf_jit_supports_arena(void);
bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
bool bpf_jit_supports_private_stack(void);
bool bpf_jit_supports_timed_may_goto(void);
+bool bpf_jit_supports_fsession(void);
u64 bpf_arch_uaddress_limit(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
u64 arch_bpf_timed_may_goto(void);
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index a55ca771286b..5747bd191bf1 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -66,19 +66,33 @@ int qcom_scm_set_warm_boot_addr(void *entry);
void qcom_scm_cpu_power_down(u32 flags);
int qcom_scm_set_remote_state(u32 state, u32 id);
-struct qcom_scm_pas_metadata {
+struct qcom_scm_pas_context {
+ struct device *dev;
+ u32 pas_id;
+ phys_addr_t mem_phys;
+ size_t mem_size;
void *ptr;
dma_addr_t phys;
ssize_t size;
+ bool use_tzmem;
};
-int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
- struct qcom_scm_pas_metadata *ctx);
-void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
-int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
-int qcom_scm_pas_auth_and_reset(u32 peripheral);
-int qcom_scm_pas_shutdown(u32 peripheral);
-bool qcom_scm_pas_supported(u32 peripheral);
+struct qcom_scm_pas_context *devm_qcom_scm_pas_context_alloc(struct device *dev,
+ u32 pas_id,
+ phys_addr_t mem_phys,
+ size_t mem_size);
+int qcom_scm_pas_init_image(u32 pas_id, const void *metadata, size_t size,
+ struct qcom_scm_pas_context *ctx);
+void qcom_scm_pas_metadata_release(struct qcom_scm_pas_context *ctx);
+int qcom_scm_pas_mem_setup(u32 pas_id, phys_addr_t addr, phys_addr_t size);
+int qcom_scm_pas_auth_and_reset(u32 pas_id);
+int qcom_scm_pas_shutdown(u32 pas_id);
+bool qcom_scm_pas_supported(u32 pas_id);
+struct resource_table *qcom_scm_pas_get_rsc_table(struct qcom_scm_pas_context *ctx,
+ void *input_rt, size_t input_rt_size,
+ size_t *output_rt_size);
+
+int qcom_scm_pas_prepare_and_auth_reset(struct qcom_scm_pas_context *ctx);
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
diff --git a/include/linux/firmware/xlnx-zynqmp-crypto.h b/include/linux/firmware/xlnx-zynqmp-crypto.h
new file mode 100644
index 000000000000..56595ab37c43
--- /dev/null
+++ b/include/linux/firmware/xlnx-zynqmp-crypto.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Firmware layer for XilSECURE APIs.
+ *
+ * Copyright (C) 2014-2022 Xilinx, Inc.
+ * Copyright (C) 2022-2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__
+#define __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__
+
+/**
+ * struct xlnx_feature - Feature data
+ * @family: Family code of platform
+ * @subfamily: Subfamily code of platform
+ * @feature_id: Feature id of module
+ * @data: Collection of all supported platform data
+ */
+struct xlnx_feature {
+ u32 family;
+ u32 feature_id;
+ void *data;
+};
+
+/* xilSecure API commands module id + api id */
+#define XSECURE_API_AES_INIT 0x509
+#define XSECURE_API_AES_OP_INIT 0x50a
+#define XSECURE_API_AES_UPDATE_AAD 0x50b
+#define XSECURE_API_AES_ENCRYPT_UPDATE 0x50c
+#define XSECURE_API_AES_ENCRYPT_FINAL 0x50d
+#define XSECURE_API_AES_DECRYPT_UPDATE 0x50e
+#define XSECURE_API_AES_DECRYPT_FINAL 0x50f
+#define XSECURE_API_AES_KEY_ZERO 0x510
+#define XSECURE_API_AES_WRITE_KEY 0x511
+
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
+int zynqmp_pm_aes_engine(const u64 address, u32 *out);
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
+void *xlnx_get_crypto_dev_data(struct xlnx_feature *feature_map);
+int versal_pm_aes_key_write(const u32 keylen,
+ const u32 keysrc, const u64 keyaddr);
+int versal_pm_aes_key_zero(const u32 keysrc);
+int versal_pm_aes_op_init(const u64 hw_req);
+int versal_pm_aes_update_aad(const u64 aad_addr, const u32 aad_len);
+int versal_pm_aes_enc_update(const u64 in_params, const u64 in_addr);
+int versal_pm_aes_dec_update(const u64 in_params, const u64 in_addr);
+int versal_pm_aes_dec_final(const u64 gcm_addr);
+int versal_pm_aes_enc_final(const u64 gcm_addr);
+int versal_pm_aes_init(void);
+
+#else
+static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+
+static inline void *xlnx_get_crypto_dev_data(struct xlnx_feature *feature_map)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int versal_pm_aes_key_write(const u32 keylen,
+ const u32 keysrc, const u64 keyaddr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_key_zero(const u32 keysrc)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_op_init(const u64 hw_req)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_update_aad(const u64 aad_addr,
+ const u32 aad_len)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_enc_update(const u64 in_params,
+ const u64 in_addr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_dec_update(const u64 in_params,
+ const u64 in_addr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_enc_final(const u64 gcm_addr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_dec_final(const u64 gcm_addr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_init(void)
+{
+ return -ENODEV;
+}
+
+#endif
+
+#endif /* __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__ */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 15fdbd089bbf..d70dcd462b44 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/firmware/xlnx-zynqmp-ufs.h>
+#include <linux/firmware/xlnx-zynqmp-crypto.h>
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
@@ -589,9 +590,7 @@ int zynqmp_pm_release_node(const u32 node);
int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack);
-int zynqmp_pm_aes_engine(const u64 address, u32 *out);
int zynqmp_pm_efuse_access(const u64 address, u32 *out);
-int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_get_status(u32 *value);
int zynqmp_pm_fpga_get_config_status(u32 *value);
@@ -772,22 +771,11 @@ static inline int zynqmp_pm_set_requirement(const u32 node,
return -ENODEV;
}
-static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
-{
- return -ENODEV;
-}
-
static inline int zynqmp_pm_efuse_access(const u64 address, u32 *out)
{
return -ENODEV;
}
-static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
- const u32 flags)
-{
- return -ENODEV;
-}
-
static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
const u32 flags)
{
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index b3b53f8c1b28..171982e53c9a 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -2,7 +2,6 @@
#ifndef _LINUX_FORTIFY_STRING_H_
#define _LINUX_FORTIFY_STRING_H_
-#include <linux/bitfield.h>
#include <linux/bug.h>
#include <linux/const.h>
#include <linux/limits.h>
@@ -10,10 +9,9 @@
#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable
#define __RENAME(x) __asm__(#x)
-#define FORTIFY_REASON_DIR(r) FIELD_GET(BIT(0), r)
-#define FORTIFY_REASON_FUNC(r) FIELD_GET(GENMASK(7, 1), r)
-#define FORTIFY_REASON(func, write) (FIELD_PREP(BIT(0), write) | \
- FIELD_PREP(GENMASK(7, 1), func))
+#define FORTIFY_REASON_DIR(r) ((r) & 1)
+#define FORTIFY_REASON_FUNC(r) ((r) >> 1)
+#define FORTIFY_REASON(func, write) ((func) << 1 | (write))
/* Overridden by KUnit tests. */
#ifndef fortify_panic
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f5c9cf28c4dc..2e4d1e8b0e71 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -631,6 +631,7 @@ is_uncached_acl(struct posix_acl *acl)
#define IOP_MGTIME 0x0020
#define IOP_CACHED_LINK 0x0040
#define IOP_FASTPERM_MAY_EXEC 0x0080
+#define IOP_FLCTX 0x0100
/*
* Inode state bits. Protected by inode->i_lock
@@ -1717,6 +1718,13 @@ static inline struct timespec64 inode_set_ctime(struct inode *inode,
struct timespec64 simple_inode_init_ts(struct inode *inode);
+static inline int inode_time_dirty_flag(struct inode *inode)
+{
+ if (inode->i_sb->s_flags & SB_LAZYTIME)
+ return I_DIRTY_TIME;
+ return I_DIRTY_SYNC;
+}
+
/*
* Snapshotting support.
*/
@@ -1855,6 +1863,8 @@ struct dir_context {
* INT_MAX unlimited
*/
int count;
+ /* @actor supports these flags in d_type high bits */
+ unsigned int dt_flags_mask;
};
/* If OR-ed with d_type, pending signals are not checked */
@@ -1983,6 +1993,11 @@ int wrap_directory_iterator(struct file *, struct dir_context *,
static int shared_##x(struct file *file , struct dir_context *ctx) \
{ return wrap_directory_iterator(file, ctx, x); }
+enum fs_update_time {
+ FS_UPD_ATIME,
+ FS_UPD_CMTIME,
+};
+
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
@@ -2010,7 +2025,9 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
- int (*update_time)(struct inode *, int);
+ int (*update_time)(struct inode *inode, enum fs_update_time type,
+ unsigned int flags);
+ void (*sync_lazytime)(struct inode *inode);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
umode_t create_mode);
@@ -2237,16 +2254,8 @@ static inline void inode_dec_link_count(struct inode *inode)
mark_inode_dirty(inode);
}
-enum file_time_flags {
- S_ATIME = 1,
- S_MTIME = 2,
- S_CTIME = 4,
- S_VERSION = 8,
-};
-
extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
-int inode_update_time(struct inode *inode, int flags);
static inline void file_accessed(struct file *file)
{
@@ -2274,8 +2283,6 @@ struct file_system_type {
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
- struct dentry *(*mount) (struct file_system_type *, int,
- const char *, void *);
void (*kill_sb) (struct super_block *);
struct module *owner;
struct file_system_type * next;
@@ -2399,8 +2406,10 @@ static inline void super_set_sysfs_name_generic(struct super_block *sb, const ch
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
void iput_not_last(struct inode *);
-int inode_update_timestamps(struct inode *inode, int flags);
-int generic_update_time(struct inode *, int);
+int inode_update_time(struct inode *inode, enum fs_update_time type,
+ unsigned int flags);
+int generic_update_time(struct inode *inode, enum fs_update_time type,
+ unsigned int flags);
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -2409,14 +2418,19 @@ extern struct kobject *fs_kobj;
/* fs/open.c */
struct audit_names;
-struct filename {
+
+struct __filename_head {
const char *name; /* pointer to actual string */
- const __user char *uptr; /* original userland pointer */
- atomic_t refcnt;
+ int refcnt;
struct audit_names *aname;
- const char iname[];
+};
+#define EMBEDDED_NAME_MAX (192 - sizeof(struct __filename_head))
+struct filename {
+ struct __filename_head;
+ const char iname[EMBEDDED_NAME_MAX];
};
static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
+static_assert(sizeof(struct filename) % 64 == 0);
static inline struct mnt_idmap *file_mnt_idmap(const struct file *file)
{
@@ -2457,7 +2471,7 @@ struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
struct file *dentry_open_nonotify(const struct path *path, int flags,
const struct cred *cred);
-struct file *dentry_create(const struct path *path, int flags, umode_t mode,
+struct file *dentry_create(struct path *path, int flags, umode_t mode,
const struct cred *cred);
const struct path *backing_file_user_path(const struct file *f);
@@ -2511,11 +2525,23 @@ static inline struct filename *getname_maybe_null(const char __user *name, int f
extern void putname(struct filename *name);
DEFINE_FREE(putname, struct filename *, if (!IS_ERR_OR_NULL(_T)) putname(_T))
-static inline struct filename *refname(struct filename *name)
-{
- atomic_inc(&name->refcnt);
- return name;
-}
+struct delayed_filename {
+ struct filename *__incomplete_filename; // don't touch
+};
+#define INIT_DELAYED_FILENAME(ptr) \
+ ((void)(*(ptr) = (struct delayed_filename){}))
+int delayed_getname(struct delayed_filename *, const char __user *);
+int delayed_getname_uflags(struct delayed_filename *v, const char __user *, int);
+void dismiss_delayed_filename(struct delayed_filename *);
+int putname_to_delayed(struct delayed_filename *, struct filename *);
+struct filename *complete_getname(struct delayed_filename *);
+
+DEFINE_CLASS(filename, struct filename *, putname(_T), getname(p), const char __user *p)
+EXTEND_CLASS(filename, _kernel, getname_kernel(p), const char *p)
+EXTEND_CLASS(filename, _flags, getname_flags(p, f), const char __user *p, unsigned int f)
+EXTEND_CLASS(filename, _uflags, getname_uflags(p, f), const char __user *p, unsigned int f)
+EXTEND_CLASS(filename, _maybe_null, getname_maybe_null(p, f), const char __user *p, unsigned int f)
+EXTEND_CLASS(filename, _complete_delayed, complete_getname(p), struct delayed_filename *p)
extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
@@ -2534,10 +2560,8 @@ static inline int finish_open_simple(struct file *file, int error)
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(void);
-extern struct kmem_cache *names_cachep;
-
-#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
-#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
+#define __getname() kmalloc(PATH_MAX, GFP_KERNEL)
+#define __putname(name) kfree(name)
void emergency_thaw_all(void);
extern int sync_filesystem(struct super_block *);
@@ -2657,6 +2681,11 @@ static inline int path_permission(const struct path *path, int mask)
int __check_sticky(struct mnt_idmap *idmap, struct inode *dir,
struct inode *inode);
+int may_delete_dentry(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *victim, bool isdir);
+int may_create_dentry(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *child);
+
static inline bool execute_ok(struct inode *inode)
{
return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
@@ -3217,7 +3246,6 @@ extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
const struct inode *context_inode);
-extern int simple_nosetlease(struct file *, int, struct file_lease **, void **);
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
@@ -3524,7 +3552,9 @@ static inline bool dir_emit(struct dir_context *ctx,
const char *name, int namelen,
u64 ino, unsigned type)
{
- return ctx->actor(ctx, name, namelen, ctx->pos, ino, type);
+ unsigned int dt_mask = S_DT_MASK | ctx->dt_flags_mask;
+
+ return ctx->actor(ctx, name, namelen, ctx->pos, ino, type & dt_mask);
}
static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
{
diff --git a/include/linux/fs/super_types.h b/include/linux/fs/super_types.h
index 6bd3009e09b3..fa7638b81246 100644
--- a/include/linux/fs/super_types.h
+++ b/include/linux/fs/super_types.h
@@ -35,6 +35,7 @@ struct user_namespace;
struct workqueue_struct;
struct writeback_control;
struct xattr_handler;
+struct fserror_event;
extern struct super_block *blockdev_superblock;
@@ -96,7 +97,6 @@ struct super_operations {
const void *owner);
int (*unfreeze_fs)(struct super_block *sb);
int (*statfs)(struct dentry *dentry, struct kstatfs *kstatfs);
- int (*remount_fs) (struct super_block *, int *, char *);
void (*umount_begin)(struct super_block *sb);
int (*show_options)(struct seq_file *seq, struct dentry *dentry);
@@ -124,6 +124,9 @@ struct super_operations {
*/
int (*remove_bdev)(struct super_block *sb, struct block_device *bdev);
void (*shutdown)(struct super_block *sb);
+
+ /* Report a filesystem error */
+ void (*report_error)(const struct fserror_event *event);
};
struct super_block {
@@ -268,6 +271,9 @@ struct super_block {
spinlock_t s_inode_wblist_lock;
struct list_head s_inodes_wb; /* writeback inodes */
long s_min_writeback_pages;
+
+ /* number of fserrors that are being sent to fsnotify/filesystems */
+ refcount_t s_pending_errors;
} __randomize_layout;
/*
diff --git a/include/linux/fserror.h b/include/linux/fserror.h
new file mode 100644
index 000000000000..5e1ad78c346e
--- /dev/null
+++ b/include/linux/fserror.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef _LINUX_FSERROR_H__
+#define _LINUX_FSERROR_H__
+
+void fserror_mount(struct super_block *sb);
+void fserror_unmount(struct super_block *sb);
+
+enum fserror_type {
+ /* pagecache I/O failed */
+ FSERR_BUFFERED_READ,
+ FSERR_BUFFERED_WRITE,
+
+ /* direct I/O failed */
+ FSERR_DIRECTIO_READ,
+ FSERR_DIRECTIO_WRITE,
+
+ /* out of band media error reported */
+ FSERR_DATA_LOST,
+
+ /* filesystem metadata */
+ FSERR_METADATA,
+};
+
+struct fserror_event {
+ struct work_struct work;
+ struct super_block *sb;
+ struct inode *inode;
+ loff_t pos;
+ u64 len;
+ enum fserror_type type;
+
+ /* negative error number */
+ int error;
+};
+
+void fserror_report(struct super_block *sb, struct inode *inode,
+ enum fserror_type type, loff_t pos, u64 len, int error,
+ gfp_t gfp);
+
+static inline void fserror_report_io(struct inode *inode,
+ enum fserror_type type, loff_t pos,
+ u64 len, int error, gfp_t gfp)
+{
+ fserror_report(inode->i_sb, inode, type, pos, len, error, gfp);
+}
+
+static inline void fserror_report_data_lost(struct inode *inode, loff_t pos,
+ u64 len, gfp_t gfp)
+{
+ fserror_report(inode->i_sb, inode, FSERR_DATA_LOST, pos, len, -EIO,
+ gfp);
+}
+
+static inline void fserror_report_file_metadata(struct inode *inode, int error,
+ gfp_t gfp)
+{
+ fserror_report(inode->i_sb, inode, FSERR_METADATA, 0, 0, error, gfp);
+}
+
+static inline void fserror_report_metadata(struct super_block *sb, int error,
+ gfp_t gfp)
+{
+ fserror_report(sb, NULL, FSERR_METADATA, 0, 0, error, gfp);
+}
+
+static inline void fserror_report_shutdown(struct super_block *sb, gfp_t gfp)
+{
+ fserror_report(sb, NULL, FSERR_METADATA, 0, 0, -ESHUTDOWN, gfp);
+}
+
+#endif /* _LINUX_FSERROR_H__ */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 770f0dc993cc..705db0a6d995 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -82,6 +82,7 @@ static inline void early_trace_init(void) { }
struct module;
struct ftrace_hash;
+struct ftrace_func_entry;
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
defined(CONFIG_DYNAMIC_FTRACE)
@@ -359,7 +360,6 @@ enum {
FTRACE_OPS_FL_DIRECT = BIT(17),
FTRACE_OPS_FL_SUBOP = BIT(18),
FTRACE_OPS_FL_GRAPH = BIT(19),
- FTRACE_OPS_FL_JMP = BIT(20),
};
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
@@ -403,9 +403,17 @@ enum ftrace_ops_cmd {
* Negative on failure. The return value is dependent on the
* callback.
*/
-typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
+typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, unsigned long ip, enum ftrace_ops_cmd cmd);
#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define FTRACE_HASH_DEFAULT_BITS 10
+
+struct ftrace_hash *alloc_ftrace_hash(int size_bits);
+void free_ftrace_hash(struct ftrace_hash *hash);
+struct ftrace_func_entry *add_ftrace_hash_entry_direct(struct ftrace_hash *hash,
+ unsigned long ip, unsigned long direct);
+
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
struct ftrace_hash __rcu *notrace_hash;
@@ -535,6 +543,10 @@ int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
+int update_ftrace_direct_add(struct ftrace_ops *ops, struct ftrace_hash *hash);
+int update_ftrace_direct_del(struct ftrace_ops *ops, struct ftrace_hash *hash);
+int update_ftrace_direct_mod(struct ftrace_ops *ops, struct ftrace_hash *hash, bool do_direct_lock);
+
void ftrace_stub_direct_tramp(void);
#else
@@ -561,6 +573,21 @@ static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned l
return -ENODEV;
}
+static inline int update_ftrace_direct_add(struct ftrace_ops *ops, struct ftrace_hash *hash)
+{
+ return -ENODEV;
+}
+
+static inline int update_ftrace_direct_del(struct ftrace_ops *ops, struct ftrace_hash *hash)
+{
+ return -ENODEV;
+}
+
+static inline int update_ftrace_direct_mod(struct ftrace_ops *ops, struct ftrace_hash *hash, bool do_direct_lock)
+{
+ return -ENODEV;
+}
+
/*
* This must be implemented by the architecture.
* It is the way the ftrace direct_ops helper, when called
@@ -1167,7 +1194,7 @@ static inline void ftrace_init(void) { }
*/
struct ftrace_graph_ent {
unsigned long func; /* Current function */
- unsigned long depth;
+ long depth; /* signed to check for less than zero */
} __packed;
/*
diff --git a/include/linux/ftrace_regs.h b/include/linux/ftrace_regs.h
index 15627ceea9bc..386fa48c4a95 100644
--- a/include/linux/ftrace_regs.h
+++ b/include/linux/ftrace_regs.h
@@ -33,6 +33,31 @@ struct ftrace_regs;
#define ftrace_regs_get_frame_pointer(fregs) \
frame_pointer(&arch_ftrace_regs(fregs)->regs)
+static __always_inline void
+ftrace_partial_regs_update(struct ftrace_regs *fregs, struct pt_regs *regs) { }
+
+#else
+
+/*
+ * ftrace_partial_regs_update - update the original ftrace_regs from regs
+ * @fregs: The ftrace_regs to update from @regs
+ * @regs: The partial regs from ftrace_partial_regs() that was updated
+ *
+ * Some architectures have the partial regs living in the ftrace_regs
+ * structure, whereas other architectures need to make a different copy
+ * of the @regs. If a partial @regs is retrieved by ftrace_partial_regs() and
+ * if the code using @regs updates a field (like the instruction pointer or
+ * stack pointer) it may need to propagate that change to the original @fregs
+ * it retrieved the partial @regs from. Use this function to guarantee that
+ * update happens.
+ */
+static __always_inline void
+ftrace_partial_regs_update(struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ ftrace_regs_set_instruction_pointer(fregs, instruction_pointer(regs));
+ ftrace_regs_set_return_value(fregs, regs_return_value(regs));
+}
+
#endif /* HAVE_ARCH_FTRACE_REGS */
/* This can be overridden by the architectures */
diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h
deleted file mode 100644
index c304dcdb4eac..000000000000
--- a/include/linux/getcpu.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_GETCPU_H
-#define _LINUX_GETCPU_H 1
-
-/* Cache for getcpu() to speed it up. Results might be a short time
- out of date, but will be faster.
-
- User programs should not refer to the contents of this structure.
- I repeat they should not refer to it. If they do they will break
- in future kernels.
-
- It is only a private cache for vgetcpu(). It will change in future kernels.
- The user program must store this information per thread (__thread)
- If you want 100% accurate information pass NULL instead. */
-struct getcpu_cache {
- unsigned long blob[128 / sizeof(long)];
-};
-
-#endif
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index ca1ec437a3ca..51a6dc2b97e9 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -447,12 +447,16 @@ struct hisi_qp_ops {
int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
};
+struct instance_backlog {
+ struct list_head list;
+ spinlock_t lock;
+};
+
struct hisi_qp {
u32 qp_id;
u16 sq_depth;
u16 cq_depth;
u8 alg_type;
- u8 req_type;
struct qm_dma qdma;
void *sqe;
@@ -462,7 +466,6 @@ struct hisi_qp {
struct hisi_qp_status qp_status;
struct hisi_qp_ops *hw_ops;
- void *qp_ctx;
void (*req_cb)(struct hisi_qp *qp, void *data);
void (*event_cb)(struct hisi_qp *qp);
@@ -471,6 +474,11 @@ struct hisi_qp {
bool is_in_kernel;
u16 pasid;
struct uacce_queue *uacce_q;
+
+ u32 ref_count;
+ spinlock_t qp_lock;
+ struct instance_backlog backlog;
+ const void **msg;
};
static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
@@ -563,6 +571,7 @@ void hisi_qm_reset_done(struct pci_dev *pdev);
int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
bool op);
+int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
@@ -575,7 +584,7 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
void hisi_acc_free_sgl_pool(struct device *dev,
struct hisi_acc_sgl_pool *pool);
int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
- u8 alg_type, int node, struct hisi_qp **qps);
+ u8 *alg_type, int node, struct hisi_qp **qps);
void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
void hisi_qm_dev_shutdown(struct pci_dev *pdev);
void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 2cf1bf65b225..74adbd4e7003 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -2,7 +2,7 @@
/*
* hrtimers - High-resolution kernel timers
*
- * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2005, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
* Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
*
* data type definitions, declarations, prototypes
@@ -112,12 +112,6 @@ static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t t
timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
}
-static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
-{
- timer->node.expires = tv64;
- timer->_softexpires = tv64;
-}
-
static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
{
timer->node.expires = ktime_add_safe(timer->node.expires, time);
@@ -140,15 +134,6 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
return timer->_softexpires;
}
-static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
-{
- return timer->node.expires;
-}
-static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
-{
- return timer->_softexpires;
-}
-
static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
{
return ktime_to_ns(timer->node.expires);
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index aa49ffa130e5..02b010df6570 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -6,26 +6,6 @@
#include <linux/timerqueue.h>
#include <linux/seqlock.h>
-#ifdef CONFIG_HIGH_RES_TIMERS
-
-/*
- * The resolution of the clocks. The resolution value is returned in
- * the clock_getres() system call to give application programmers an
- * idea of the (in)accuracy of timers. Timer values are rounded up to
- * this resolution values.
- */
-# define HIGH_RES_NSEC 1
-# define KTIME_HIGH_RES (HIGH_RES_NSEC)
-# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
-# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
-
-#else
-
-# define MONOTONIC_RES_NSEC LOW_RES_NSEC
-# define KTIME_MONOTONIC_RES KTIME_LOW_RES
-
-#endif
-
#ifdef CONFIG_64BIT
# define __hrtimer_clock_base_align ____cacheline_aligned
#else
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 019a1c5281e4..e51b8ef0cebd 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -240,8 +240,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
unsigned long hugetlb_mask_last_page(struct hstate *h);
-int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep);
+int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
@@ -300,13 +301,17 @@ static inline struct address_space *hugetlb_folio_mapping_lock_write(
return NULL;
}
-static inline int huge_pmd_unshare(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline int huge_pmd_unshare(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
return 0;
}
+static inline void huge_pmd_unshare_flush(struct mmu_gather *tlb,
+ struct vm_area_struct *vma)
+{
+}
+
static inline void adjust_range_if_pmd_sharing_possible(
struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
@@ -1326,7 +1331,7 @@ static inline __init void hugetlb_cma_reserve(int order)
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static inline bool hugetlb_pmd_shared(pte_t *pte)
{
- return page_count(virt_to_page(pte)) > 1;
+ return ptdesc_pmd_is_shared(virt_to_ptdesc(pte));
}
#else
static inline bool hugetlb_pmd_shared(pte_t *pte)
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index b424555753b1..b77bc55a4cf3 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -15,6 +15,7 @@
#include <linux/completion.h>
#include <linux/kref.h>
#include <linux/types.h>
+#include <linux/workqueue_types.h>
/**
* struct hwrng - Hardware Random Number Generator driver
@@ -48,6 +49,7 @@ struct hwrng {
/* internal. */
struct list_head list;
struct kref ref;
+ struct work_struct cleanup_work;
struct completion cleanup_done;
struct completion dying;
};
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index 9fcb6410a584..971d53349b6f 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -25,7 +25,7 @@
* @I3C_ERROR_M2: M2 error
*
* These are the standard error codes as defined by the I3C specification.
- * When -EIO is returned by the i3c_device_do_priv_xfers() or
+ * When -EIO is returned by the i3c_device_do_i3c_xfers() or
* i3c_device_send_hdr_cmds() one can check the error code in
* &struct_i3c_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
* what went wrong.
@@ -79,9 +79,6 @@ struct i3c_xfer {
enum i3c_error_code err;
};
-/* keep back compatible */
-#define i3c_priv_xfer i3c_xfer
-
/**
* enum i3c_dcr - I3C DCR values
* @I3C_DCR_GENERIC_DEVICE: generic I3C device
@@ -308,15 +305,23 @@ static __always_inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
i3c_i2c_driver_unregister, \
__i2cdrv)
+#if IS_ENABLED(CONFIG_I3C)
int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers,
int nxfers, enum i3c_xfer_mode mode);
+u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev);
+#else
+static inline int
+i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode)
+{
+ return -EOPNOTSUPP;
+}
-static inline int i3c_device_do_priv_xfers(struct i3c_device *dev,
- struct i3c_xfer *xfers,
- int nxfers)
+static inline u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev)
{
- return i3c_device_do_xfers(dev, xfers, nxfers, I3C_SDR);
+ return 0;
}
+#endif
int i3c_device_do_setdasa(struct i3c_device *dev);
@@ -358,6 +363,5 @@ int i3c_device_request_ibi(struct i3c_device *dev,
void i3c_device_free_ibi(struct i3c_device *dev);
int i3c_device_enable_ibi(struct i3c_device *dev);
int i3c_device_disable_ibi(struct i3c_device *dev);
-u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev);
#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 58d01ed4cce7..592b646f6134 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -462,6 +462,8 @@ struct i3c_bus {
* @enable_hotjoin: enable hot join event detect.
* @disable_hotjoin: disable hot join event detect.
* @set_speed: adjust I3C open drain mode timing.
+ * @set_dev_nack_retry: configure device NACK retry count for the master
+ * controller.
*/
struct i3c_master_controller_ops {
int (*bus_init)(struct i3c_master_controller *master);
@@ -491,6 +493,8 @@ struct i3c_master_controller_ops {
int (*enable_hotjoin)(struct i3c_master_controller *master);
int (*disable_hotjoin)(struct i3c_master_controller *master);
int (*set_speed)(struct i3c_master_controller *master, enum i3c_open_drain_speed speed);
+ int (*set_dev_nack_retry)(struct i3c_master_controller *master,
+ unsigned long dev_nack_retry_cnt);
};
/**
@@ -505,6 +509,8 @@ struct i3c_master_controller_ops {
* @secondary: true if the master is a secondary master
* @init_done: true when the bus initialization is done
* @hotjoin: true if the master support hotjoin
+ * @rpm_allowed: true if Runtime PM allowed
+ * @rpm_ibi_allowed: true if IBI and Hot-Join allowed while runtime suspended
* @boardinfo.i3c: list of I3C boardinfo objects
* @boardinfo.i2c: list of I2C boardinfo objects
* @boardinfo: board-level information attached to devices connected on the bus
@@ -514,6 +520,7 @@ struct i3c_master_controller_ops {
* in a thread context. Typical examples are Hot Join processing which
* requires taking the bus lock in maintenance, which in turn, can only
* be done from a sleep-able context
+ * @dev_nack_retry_count: retry count when slave device nack
*
* A &struct i3c_master_controller has to be registered to the I3C subsystem
* through i3c_master_register(). None of &struct i3c_master_controller fields
@@ -528,12 +535,15 @@ struct i3c_master_controller {
unsigned int secondary : 1;
unsigned int init_done : 1;
unsigned int hotjoin: 1;
+ unsigned int rpm_allowed: 1;
+ unsigned int rpm_ibi_allowed: 1;
struct {
struct list_head i3c;
struct list_head i2c;
} boardinfo;
struct i3c_bus bus;
struct workqueue_struct *wq;
+ unsigned int dev_nack_retry_count;
};
/**
@@ -595,6 +605,7 @@ int i3c_master_get_free_addr(struct i3c_master_controller *master,
int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
u8 addr);
int i3c_master_do_daa(struct i3c_master_controller *master);
+int i3c_master_do_daa_ext(struct i3c_master_controller *master, bool rstdaa);
struct i3c_dma *i3c_master_dma_map_single(struct device *dev, void *ptr,
size_t len, bool force_bounce,
enum dma_data_direction dir);
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
index 4247497f3f8b..b87841a355f8 100644
--- a/include/linux/iio/iio-opaque.h
+++ b/include/linux/iio/iio-opaque.h
@@ -14,6 +14,7 @@
* @mlock: lock used to prevent simultaneous device state changes
* @mlock_key: lockdep class for iio_dev lock
* @info_exist_lock: lock to prevent use during removal
+ * @info_exist_key: lockdep class for info_exist lock
* @trig_readonly: mark the current trigger immutable
* @event_interface: event chrdevs associated with interrupt lines
* @attached_buffers: array of buffers statically attached by the driver
@@ -47,6 +48,7 @@ struct iio_dev_opaque {
struct mutex mlock;
struct lock_class_key mlock_key;
struct mutex info_exist_lock;
+ struct lock_class_key info_exist_key;
bool trig_readonly;
struct iio_event_interface *event_interface;
struct iio_buffer **attached_buffers;
diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h
index 92045d18cbfc..28776ee28d8e 100644
--- a/include/linux/init_syscalls.h
+++ b/include/linux/init_syscalls.h
@@ -17,3 +17,4 @@ int __init init_mkdir(const char *pathname, umode_t mode);
int __init init_rmdir(const char *pathname);
int __init init_utimes(char *filename, struct timespec64 *ts);
int __init init_dup(struct file *file);
+int __init init_pivot_root(const char *new_root, const char *put_old);
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index f1a1f4c92ded..7e5d26c8136f 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -3,8 +3,6 @@
#ifndef __LINUX_INITRD_H
#define __LINUX_INITRD_H
-#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
-
/* starting block # of image */
extern int rd_image_start;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 266f2b39213a..6cd26ffb0505 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -181,9 +181,8 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *name, void *dev_id);
extern int __must_check
-__request_percpu_irq(unsigned int irq, irq_handler_t handler,
- unsigned long flags, const char *devname,
- const cpumask_t *affinity, void __percpu *percpu_dev_id);
+request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler, const char *devname,
+ const cpumask_t *affinity, void __percpu *percpu_dev_id);
extern int __must_check
request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
@@ -193,17 +192,8 @@ static inline int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id)
{
- return __request_percpu_irq(irq, handler, 0,
- devname, NULL, percpu_dev_id);
-}
-
-static inline int __must_check
-request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler,
- const char *devname, const cpumask_t *affinity,
- void __percpu *percpu_dev_id)
-{
- return __request_percpu_irq(irq, handler, 0,
- devname, affinity, percpu_dev_id);
+ return request_percpu_irq_affinity(irq, handler, devname,
+ NULL, percpu_dev_id);
}
extern int __must_check
@@ -228,7 +218,7 @@ static inline int __must_check
devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
{
- return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
+ return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags | IRQF_COND_ONESHOT,
devname, dev_id);
}
@@ -871,12 +861,6 @@ static inline void init_irq_proc(void)
}
#endif
-#ifdef CONFIG_IRQ_TIMINGS
-void irq_timings_enable(void);
-void irq_timings_disable(void);
-u64 irq_timings_next_event(u64 now);
-#endif
-
struct seq_file;
int show_interrupts(struct seq_file *p, void *v);
int arch_show_interrupts(struct seq_file *p, int prec);
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 85fe4e6b275c..d1aa4edfc2a5 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -12,6 +12,7 @@ void __io_uring_free(struct task_struct *tsk);
void io_uring_unreg_ringfd(void);
const char *io_uring_get_opcode(u8 opcode);
bool io_is_uring_fops(struct file *file);
+int __io_uring_fork(struct task_struct *tsk);
static inline void io_uring_files_cancel(void)
{
@@ -25,9 +26,16 @@ static inline void io_uring_task_cancel(void)
}
static inline void io_uring_free(struct task_struct *tsk)
{
- if (tsk->io_uring)
+ if (tsk->io_uring || tsk->io_uring_restrict)
__io_uring_free(tsk);
}
+static inline int io_uring_fork(struct task_struct *tsk)
+{
+ if (tsk->io_uring_restrict)
+ return __io_uring_fork(tsk);
+
+ return 0;
+}
#else
static inline void io_uring_task_cancel(void)
{
@@ -46,6 +54,10 @@ static inline bool io_is_uring_fops(struct file *file)
{
return false;
}
+static inline int io_uring_fork(struct task_struct *tsk)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index a3e8ddc9b380..3e4a82a6f817 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -219,12 +219,26 @@ struct io_rings {
struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
};
+struct io_bpf_filter;
+struct io_bpf_filters {
+ refcount_t refs; /* ref for ->bpf_filters */
+ spinlock_t lock; /* protects ->bpf_filters modifications */
+ struct io_bpf_filter __rcu **filters;
+ struct rcu_head rcu_head;
+};
+
struct io_restriction {
DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
+ struct io_bpf_filters *bpf_filters;
+ /* ->bpf_filters needs COW on modification */
+ bool bpf_filters_cow;
u8 sqe_flags_allowed;
u8 sqe_flags_required;
- bool registered;
+ /* IORING_OP_* restrictions exist */
+ bool op_registered;
+ /* IORING_REGISTER_* restrictions exist */
+ bool reg_registered;
};
struct io_submit_link {
@@ -259,7 +273,8 @@ struct io_ring_ctx {
struct {
unsigned int flags;
unsigned int drain_next: 1;
- unsigned int restricted: 1;
+ unsigned int op_restricted: 1;
+ unsigned int reg_restricted: 1;
unsigned int off_timeout_used: 1;
unsigned int drain_active: 1;
unsigned int has_evfd: 1;
@@ -274,6 +289,8 @@ struct io_ring_ctx {
struct task_struct *submitter_task;
struct io_rings *rings;
+ /* cache of ->restrictions.bpf_filters->filters */
+ struct io_bpf_filter __rcu **bpf_filters;
struct percpu_ref refs;
clockid_t clockid;
@@ -316,7 +333,7 @@ struct io_ring_ctx {
* manipulate the list, hence no extra locking is needed there.
*/
bool poll_multi_queue;
- struct io_wq_work_list iopoll_list;
+ struct list_head iopoll_list;
struct io_file_table file_table;
struct io_rsrc_data buf_table;
@@ -444,6 +461,9 @@ struct io_ring_ctx {
struct list_head defer_list;
unsigned nr_drained;
+ /* protected by ->completion_lock */
+ unsigned nr_req_allocated;
+
#ifdef CONFIG_NET_RX_BUSY_POLL
struct list_head napi_list; /* track busy poll napi_id */
spinlock_t napi_lock; /* napi_list lock */
@@ -456,10 +476,6 @@ struct io_ring_ctx {
DECLARE_HASHTABLE(napi_ht, 4);
#endif
- /* protected by ->completion_lock */
- unsigned evfd_last_cq_tail;
- unsigned nr_req_allocated;
-
/*
* Protection for resize vs mmap races - both the mmap and resize
* side will need to grab this lock, to prevent either side from
@@ -714,15 +730,21 @@ struct io_kiocb {
atomic_t refs;
bool cancel_seq_set;
- struct io_task_work io_task_work;
+
+ union {
+ struct io_task_work io_task_work;
+ /* For IOPOLL setup queues, with hybrid polling */
+ u64 iopoll_start;
+ };
+
union {
/*
* for polled requests, i.e. IORING_OP_POLL_ADD and async armed
* poll
*/
struct hlist_node hash_node;
- /* For IOPOLL setup queues, with hybrid polling */
- u64 iopoll_start;
+ /* IOPOLL completion handling */
+ struct list_head iopoll_node;
/* for private io_kiocb freeing */
struct rcu_head rcu_head;
};
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 520e967cb501..99b7209dabd7 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -88,6 +88,9 @@ struct vm_fault;
/*
* Flags set by the core iomap code during operations:
*
+ * IOMAP_F_FOLIO_BATCH indicates that the folio batch mechanism is active
+ * for this operation, set by iomap_fill_dirty_folios().
+ *
* IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
* has changed as the result of this write operation.
*
@@ -95,6 +98,7 @@ struct vm_fault;
* range it covers needs to be remapped by the high level before the operation
* can proceed.
*/
+#define IOMAP_F_FOLIO_BATCH (1U << 13)
#define IOMAP_F_SIZE_CHANGED (1U << 14)
#define IOMAP_F_STALE (1U << 15)
@@ -341,9 +345,9 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private);
void iomap_read_folio(const struct iomap_ops *ops,
- struct iomap_read_folio_ctx *ctx);
+ struct iomap_read_folio_ctx *ctx, void *private);
void iomap_readahead(const struct iomap_ops *ops,
- struct iomap_read_folio_ctx *ctx);
+ struct iomap_read_folio_ctx *ctx, void *private);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
@@ -352,8 +356,8 @@ bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops);
-loff_t iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t offset,
- loff_t length);
+unsigned int iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t *start,
+ loff_t end, unsigned int *iomap_flags);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private);
@@ -562,6 +566,15 @@ struct iomap_dio_ops {
*/
#define IOMAP_DIO_FSBLOCK_ALIGNED (1 << 3)
+/*
+ * Bounce buffer instead of using zero copy access.
+ *
+ * This is needed if the device needs stable data to checksum or generate
+ * parity. The file system must hook into the I/O submission and offload
+ * completions to user context for reads when this is set.
+ */
+#define IOMAP_DIO_BOUNCE (1 << 4)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
unsigned int dio_flags, void *private, size_t done_before);
@@ -595,7 +608,7 @@ static inline void iomap_bio_read_folio(struct folio *folio,
.cur_folio = folio,
};
- iomap_read_folio(ops, &ctx);
+ iomap_read_folio(ops, &ctx, NULL);
}
static inline void iomap_bio_readahead(struct readahead_control *rac,
@@ -606,7 +619,7 @@ static inline void iomap_bio_readahead(struct readahead_control *rac,
.rac = rac,
};
- iomap_readahead(ops, &ctx);
+ iomap_readahead(ops, &ctx, NULL);
}
#endif /* CONFIG_BLOCK */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4a9f1d7b08c3..951acbdb9f84 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -459,6 +459,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* checks against the supplied affinity mask are not
* required. This is used for CPU hotplug where the
* target CPU is not yet set in the cpu_online_mask.
+ * @irq_pre_redirect: Optional function to be invoked before redirecting
+ * an interrupt via irq_work. Called only on CONFIG_SMP.
* @irq_retrigger: resend an IRQ to the CPU
* @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
* @irq_set_wake: enable/disable power-management wake-on of an IRQ
@@ -503,6 +505,7 @@ struct irq_chip {
void (*irq_eoi)(struct irq_data *data);
int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
+ void (*irq_pre_redirect)(struct irq_data *data);
int (*irq_retrigger)(struct irq_data *data);
int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
int (*irq_set_wake)(struct irq_data *data, unsigned int on);
@@ -595,9 +598,6 @@ enum {
#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
-struct irqaction;
-extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
-
#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
@@ -658,7 +658,7 @@ extern void handle_fasteoi_nmi(struct irq_desc *desc);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
-extern int irq_chip_pm_put(struct irq_data *data);
+extern void irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
@@ -687,6 +687,13 @@ extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
extern int irq_chip_request_resources_parent(struct irq_data *data);
extern void irq_chip_release_resources_parent(struct irq_data *data);
+#ifdef CONFIG_SMP
+void irq_chip_pre_redirect_parent(struct irq_data *data);
+#endif
+#endif
+
+#ifdef CONFIG_SMP
+int irq_chip_redirect_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force);
#endif
/* Disable or mask interrupts during a kernel kexec */
diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h
index 68ddcdb1cec5..3da1ad80fc9d 100644
--- a/include/linux/irqchip/arm-gic-v5.h
+++ b/include/linux/irqchip/arm-gic-v5.h
@@ -265,6 +265,12 @@
#define GICV5_IWB_WENABLE_STATUSR_IDLE BIT(0)
+#define GICV5_GSI_IC_TYPE GENMASK(31, 29)
+#define GICV5_GSI_IWB_TYPE 0x7
+
+#define GICV5_GSI_IWB_FRAME_ID GENMASK(28, 16)
+#define GICV5_GSI_IWB_WIRE GENMASK(15, 0)
+
/*
* Global Data structures and functions
*/
@@ -344,6 +350,7 @@ void __init gicv5_init_lpi_domain(void);
void __init gicv5_free_lpi_domain(void);
int gicv5_irs_of_probe(struct device_node *parent);
+int gicv5_irs_acpi_probe(void);
void gicv5_irs_remove(void);
int gicv5_irs_enable(void);
void gicv5_irs_its_probe(void);
@@ -391,4 +398,5 @@ int gicv5_alloc_lpi(void);
void gicv5_free_lpi(u32 lpi);
void __init gicv5_its_of_probe(struct device_node *parent);
+void __init gicv5_its_acpi_probe(void);
#endif
diff --git a/include/linux/irqchip/irq-renesas-rzt2h.h b/include/linux/irqchip/irq-renesas-rzt2h.h
new file mode 100644
index 000000000000..853fd5ee0b22
--- /dev/null
+++ b/include/linux/irqchip/irq-renesas-rzt2h.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ/T2H Interrupt Control Unit (ICU)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation.
+ */
+
+#ifndef __LINUX_IRQ_RENESAS_RZT2H
+#define __LINUX_IRQ_RENESAS_RZT2H
+
+#include <linux/platform_device.h>
+
+#define RZT2H_ICU_DMAC_REQ_NO_DEFAULT 0x3ff
+
+#ifdef CONFIG_RENESAS_RZT2H_ICU
+void rzt2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
+ u16 req_no);
+#else
+static inline void rzt2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index,
+ u8 dmac_channel, u16 req_no) { }
+#endif
+
+#endif /* __LINUX_IRQ_RENESAS_RZT2H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 17902861de76..dae9a9b93665 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -2,9 +2,10 @@
#ifndef _LINUX_IRQDESC_H
#define _LINUX_IRQDESC_H
-#include <linux/rcupdate.h>
+#include <linux/irq_work.h>
#include <linux/kobject.h>
#include <linux/mutex.h>
+#include <linux/rcupdate.h>
/*
* Core internal functions to deal with irq descriptors
@@ -30,6 +31,17 @@ struct irqstat {
};
/**
+ * struct irq_redirect - interrupt redirection metadata
+ * @work: Harg irq_work item for handler execution on a different CPU
+ * @target_cpu: CPU to run irq handler on in case the current CPU is not part
+ * of the irq affinity mask
+ */
+struct irq_redirect {
+ struct irq_work work;
+ unsigned int target_cpu;
+};
+
+/**
* struct irq_desc - interrupt descriptor
* @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
@@ -46,6 +58,7 @@ struct irqstat {
* @threads_handled: stats field for deferred spurious detection of threaded handlers
* @threads_handled_last: comparator field for deferred spurious detection of threaded handlers
* @lock: locking for SMP
+ * @redirect: Facility for redirecting interrupts via irq_work
* @affinity_hint: hint to user space for preferred irq affinity
* @affinity_notify: context for notification of affinity changes
* @pending_mask: pending rebalanced interrupts
@@ -83,6 +96,7 @@ struct irq_desc {
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
+ struct irq_redirect redirect;
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -185,6 +199,7 @@ int generic_handle_irq_safe(unsigned int irq);
int generic_handle_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq);
int generic_handle_domain_irq_safe(struct irq_domain *domain, irq_hw_number_t hwirq);
int generic_handle_domain_nmi(struct irq_domain *domain, irq_hw_number_t hwirq);
+bool generic_handle_demux_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq);
#endif
/* Test to see if a driver has successfully requested an irq */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 62f81bbeb490..73c25d40846c 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -257,7 +257,8 @@ static inline void irq_domain_set_pm_device(struct irq_domain *d, struct device
#ifdef CONFIG_IRQ_DOMAIN
struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
- const char *name, phys_addr_t *pa);
+ const char *name, phys_addr_t *pa,
+ struct fwnode_handle *parent);
enum {
IRQCHIP_FWNODE_REAL,
@@ -267,18 +268,39 @@ enum {
static inline struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
{
- return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL);
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL, NULL);
+}
+
+static inline
+struct fwnode_handle *irq_domain_alloc_named_parented_fwnode(const char *name,
+ struct fwnode_handle *parent)
+{
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL, parent);
}
static inline struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name,
- NULL);
+ NULL, NULL);
+}
+
+static inline
+struct fwnode_handle *irq_domain_alloc_named_id_parented_fwnode(const char *name, int id,
+ struct fwnode_handle *parent)
+{
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name,
+ NULL, parent);
}
static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
{
- return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa);
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa, NULL);
+}
+
+static inline struct fwnode_handle *irq_domain_alloc_parented_fwnode(phys_addr_t *pa,
+ struct fwnode_handle *parent)
+{
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa, parent);
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index f5eaf76198f3..a53a00d36228 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1815,7 +1815,4 @@ static inline int jbd2_handle_buffer_credits(handle_t *handle)
#endif /* __KERNEL__ */
-#define EFSBADCRC EBADMSG /* Bad CRC detected */
-#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
-
#endif /* _LINUX_JBD2_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 9c6ac4b62eb9..338a1921a50a 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -641,6 +641,17 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
__kasan_unpoison_vmap_areas(vms, nr_vms, flags);
}
+void __kasan_vrealloc(const void *start, unsigned long old_size,
+ unsigned long new_size);
+
+static __always_inline void kasan_vrealloc(const void *start,
+ unsigned long old_size,
+ unsigned long new_size)
+{
+ if (kasan_enabled())
+ __kasan_vrealloc(start, old_size, new_size);
+}
+
#else /* CONFIG_KASAN_VMALLOC */
static inline void kasan_populate_early_vm_area_shadow(void *start,
@@ -670,6 +681,9 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
kasan_vmalloc_flags_t flags)
{ }
+static inline void kasan_vrealloc(const void *start, unsigned long old_size,
+ unsigned long new_size) { }
+
#endif /* CONFIG_KASAN_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 0ad1ddbb8b99..e5822f6e7f27 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -211,6 +211,7 @@ struct kmem_obj_info;
* __kfence_obj_info() - fill kmem_obj_info struct
* @kpp: kmem_obj_info to be filled
* @object: the object
+ * @slab: the slab
*
* Return:
* * false - not a KFENCE object
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 88e82ab1367c..9bc6abe57572 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -81,6 +81,7 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *mutex)
+ __cond_acquires(true, mutex)
{
if (refcount_dec_and_mutex_lock(&kref->refcount, mutex)) {
release(kref);
@@ -102,6 +103,7 @@ static inline int kref_put_mutex(struct kref *kref,
static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
+ __cond_acquires(true, lock)
{
if (refcount_dec_and_lock(&kref->refcount, lock)) {
release(kref);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 8d27403888ce..c92c1149ee6e 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -100,6 +100,7 @@ void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
void kthread_exit(long result) __noreturn;
void kthread_complete_and_exit(struct completion *, long) __noreturn;
+int kthreads_update_housekeeping(void);
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 383ed9985802..f247e564602f 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -3,7 +3,7 @@
*
* ktime_t - nanosecond-resolution time format.
*
- * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2005, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
* Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
*
* data type definitions, declarations, prototypes and macros.
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index ae1b541446c9..df9eebe6afca 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -144,11 +144,13 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
}
static inline void hlist_bl_lock(struct hlist_bl_head *b)
+ __acquires(__bitlock(0, b))
{
bit_spin_lock(0, (unsigned long *)b);
}
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
+ __releases(__bitlock(0, b))
{
__bit_spin_unlock(0, (unsigned long *)b);
}
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 772919e8096a..ba9e3988c07c 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -175,6 +175,9 @@ int klp_enable_patch(struct klp_patch *);
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
+void *klp_find_section_by_name(const struct module *mod, const char *name,
+ size_t *sec_size);
+
void klp_copy_process(struct task_struct *child);
void klp_update_patch_state(struct task_struct *task);
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index b0e6ab329b00..b8830148a859 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -14,13 +14,13 @@
* local_lock - Acquire a per CPU local lock
* @lock: The lock variable
*/
-#define local_lock(lock) __local_lock(this_cpu_ptr(lock))
+#define local_lock(lock) __local_lock(__this_cpu_local_lock(lock))
/**
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
* @lock: The lock variable
*/
-#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock))
+#define local_lock_irq(lock) __local_lock_irq(__this_cpu_local_lock(lock))
/**
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
@@ -29,19 +29,19 @@
* @flags: Storage for interrupt flags
*/
#define local_lock_irqsave(lock, flags) \
- __local_lock_irqsave(this_cpu_ptr(lock), flags)
+ __local_lock_irqsave(__this_cpu_local_lock(lock), flags)
/**
* local_unlock - Release a per CPU local lock
* @lock: The lock variable
*/
-#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock))
+#define local_unlock(lock) __local_unlock(__this_cpu_local_lock(lock))
/**
* local_unlock_irq - Release a per CPU local lock and enable interrupts
* @lock: The lock variable
*/
-#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock))
+#define local_unlock_irq(lock) __local_unlock_irq(__this_cpu_local_lock(lock))
/**
* local_unlock_irqrestore - Release a per CPU local lock and restore
@@ -50,7 +50,7 @@
* @flags: Interrupt flags to restore
*/
#define local_unlock_irqrestore(lock, flags) \
- __local_unlock_irqrestore(this_cpu_ptr(lock), flags)
+ __local_unlock_irqrestore(__this_cpu_local_lock(lock), flags)
/**
* local_trylock_init - Runtime initialize a lock instance
@@ -66,7 +66,7 @@
* locking constrains it will _always_ fail to acquire the lock in NMI or
* HARDIRQ context on PREEMPT_RT.
*/
-#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
+#define local_trylock(lock) __local_trylock(__this_cpu_local_lock(lock))
#define local_lock_is_locked(lock) __local_lock_is_locked(lock)
@@ -81,27 +81,44 @@
* HARDIRQ context on PREEMPT_RT.
*/
#define local_trylock_irqsave(lock, flags) \
- __local_trylock_irqsave(this_cpu_ptr(lock), flags)
-
-DEFINE_GUARD(local_lock, local_lock_t __percpu*,
- local_lock(_T),
- local_unlock(_T))
-DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
- local_lock_irq(_T),
- local_unlock_irq(_T))
+ __local_trylock_irqsave(__this_cpu_local_lock(lock), flags)
+
+DEFINE_LOCK_GUARD_1(local_lock, local_lock_t __percpu,
+ local_lock(_T->lock),
+ local_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1(local_lock_irq, local_lock_t __percpu,
+ local_lock_irq(_T->lock),
+ local_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
local_lock_irqsave(_T->lock, _T->flags),
local_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
#define local_lock_nested_bh(_lock) \
- __local_lock_nested_bh(this_cpu_ptr(_lock))
+ __local_lock_nested_bh(__this_cpu_local_lock(_lock))
#define local_unlock_nested_bh(_lock) \
- __local_unlock_nested_bh(this_cpu_ptr(_lock))
-
-DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
- local_lock_nested_bh(_T),
- local_unlock_nested_bh(_T))
+ __local_unlock_nested_bh(__this_cpu_local_lock(_lock))
+
+DEFINE_LOCK_GUARD_1(local_lock_nested_bh, local_lock_t __percpu,
+ local_lock_nested_bh(_T->lock),
+ local_unlock_nested_bh(_T->lock))
+
+DEFINE_LOCK_GUARD_1(local_lock_init, local_lock_t, local_lock_init(_T->lock), /* */)
+
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irq, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irq, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irqsave, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irqsave, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_nested_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_init, __acquires(_T), __releases(*(local_lock_t **)_T))
+#define class_local_lock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_init, _T)
+
+DEFINE_LOCK_GUARD_1(local_trylock_init, local_trylock_t, local_trylock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(local_trylock_init, __acquires(_T), __releases(*(local_trylock_t **)_T))
+#define class_local_trylock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_trylock_init, _T)
#endif
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 8f82b4eb542f..eff711bf973f 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -4,25 +4,30 @@
#endif
#include <linux/percpu-defs.h>
+#include <linux/irqflags.h>
#include <linux/lockdep.h>
+#include <linux/debug_locks.h>
+#include <asm/current.h>
#ifndef CONFIG_PREEMPT_RT
-typedef struct {
+context_lock_struct(local_lock) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
struct task_struct *owner;
#endif
-} local_lock_t;
+};
+typedef struct local_lock local_lock_t;
/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
-typedef struct {
+context_lock_struct(local_trylock) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
struct task_struct *owner;
#endif
u8 acquired;
-} local_trylock_t;
+};
+typedef struct local_trylock local_trylock_t;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
@@ -84,7 +89,10 @@ do { \
local_lock_debug_init(lock); \
} while (0)
-#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock)
+#define __local_trylock_init(lock) \
+do { \
+ __local_lock_init((local_lock_t *)lock); \
+} while (0)
#define __spinlock_nested_bh_init(lock) \
do { \
@@ -117,22 +125,25 @@ do { \
do { \
preempt_disable(); \
__local_lock_acquire(lock); \
+ __acquire(lock); \
} while (0)
#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
__local_lock_acquire(lock); \
+ __acquire(lock); \
} while (0)
#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
__local_lock_acquire(lock); \
+ __acquire(lock); \
} while (0)
#define __local_trylock(lock) \
- ({ \
+ __try_acquire_ctx_lock(lock, ({ \
local_trylock_t *__tl; \
\
preempt_disable(); \
@@ -146,10 +157,10 @@ do { \
(local_lock_t *)__tl); \
} \
!!__tl; \
- })
+ }))
#define __local_trylock_irqsave(lock, flags) \
- ({ \
+ __try_acquire_ctx_lock(lock, ({ \
local_trylock_t *__tl; \
\
local_irq_save(flags); \
@@ -163,7 +174,7 @@ do { \
(local_lock_t *)__tl); \
} \
!!__tl; \
- })
+ }))
/* preemption or migration must be disabled before calling __local_lock_is_locked */
#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
@@ -186,18 +197,21 @@ do { \
#define __local_unlock(lock) \
do { \
+ __release(lock); \
__local_lock_release(lock); \
preempt_enable(); \
} while (0)
#define __local_unlock_irq(lock) \
do { \
+ __release(lock); \
__local_lock_release(lock); \
local_irq_enable(); \
} while (0)
#define __local_unlock_irqrestore(lock, flags) \
do { \
+ __release(lock); \
__local_lock_release(lock); \
local_irq_restore(flags); \
} while (0)
@@ -206,13 +220,20 @@ do { \
do { \
lockdep_assert_in_softirq(); \
local_lock_acquire((lock)); \
+ __acquire(lock); \
} while (0)
#define __local_unlock_nested_bh(lock) \
- local_lock_release((lock))
+ do { \
+ __release(lock); \
+ local_lock_release((lock)); \
+ } while (0)
#else /* !CONFIG_PREEMPT_RT */
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
/*
* On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
* critical section while staying preemptible.
@@ -267,7 +288,7 @@ do { \
} while (0)
#define __local_trylock(lock) \
- ({ \
+ __try_acquire_ctx_lock(lock, context_unsafe(({ \
int __locked; \
\
if (in_nmi() | in_hardirq()) { \
@@ -279,17 +300,40 @@ do { \
migrate_enable(); \
} \
__locked; \
- })
+ })))
#define __local_trylock_irqsave(lock, flags) \
- ({ \
+ __try_acquire_ctx_lock(lock, ({ \
typecheck(unsigned long, flags); \
flags = 0; \
__local_trylock(lock); \
- })
+ }))
/* migration must be disabled before calling __local_lock_is_locked */
#define __local_lock_is_locked(__lock) \
(rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
#endif /* CONFIG_PREEMPT_RT */
+
+#if defined(WARN_CONTEXT_ANALYSIS)
+/*
+ * Because the compiler only knows about the base per-CPU variable, use this
+ * helper function to make the compiler think we lock/unlock the @base variable,
+ * and hide the fact we actually pass the per-CPU instance to lock/unlock
+ * functions.
+ */
+static __always_inline local_lock_t *__this_cpu_local_lock(local_lock_t __percpu *base)
+ __returns_ctx_lock(base) __attribute__((overloadable))
+{
+ return this_cpu_ptr(base);
+}
+#ifndef CONFIG_PREEMPT_RT
+static __always_inline local_trylock_t *__this_cpu_local_lock(local_trylock_t __percpu *base)
+ __returns_ctx_lock(base) __attribute__((overloadable))
+{
+ return this_cpu_ptr(base);
+}
+#endif /* CONFIG_PREEMPT_RT */
+#else /* WARN_CONTEXT_ANALYSIS */
+#define __this_cpu_local_lock(base) this_cpu_ptr(base)
+#endif /* WARN_CONTEXT_ANALYSIS */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index dd634103b014..621566345406 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -282,16 +282,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
#define lockdep_assert_held(l) \
- lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
+ do { lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD); __assume_ctx_lock(l); } while (0)
#define lockdep_assert_not_held(l) \
lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
#define lockdep_assert_held_write(l) \
- lockdep_assert(lockdep_is_held_type(l, 0))
+ do { lockdep_assert(lockdep_is_held_type(l, 0)); __assume_ctx_lock(l); } while (0)
#define lockdep_assert_held_read(l) \
- lockdep_assert(lockdep_is_held_type(l, 1))
+ do { lockdep_assert(lockdep_is_held_type(l, 1)); __assume_shared_ctx_lock(l); } while (0)
#define lockdep_assert_held_once(l) \
lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
@@ -389,10 +389,10 @@ extern int lockdep_is_held(const void *);
#define lockdep_assert(c) do { } while (0)
#define lockdep_assert_once(c) do { } while (0)
-#define lockdep_assert_held(l) do { (void)(l); } while (0)
+#define lockdep_assert_held(l) __assume_ctx_lock(l)
#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
-#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
-#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_write(l) __assume_ctx_lock(l)
+#define lockdep_assert_held_read(l) __assume_shared_ctx_lock(l)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
#define lockdep_assert_none_held_once() do { } while (0)
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 815d871fadfc..6ded24cdb4a8 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -49,9 +49,7 @@ static inline void lockref_init(struct lockref *lockref)
void lockref_get(struct lockref *lockref);
int lockref_put_return(struct lockref *lockref);
bool lockref_get_not_zero(struct lockref *lockref);
-bool lockref_put_or_lock(struct lockref *lockref);
-#define lockref_put_or_lock(_lockref) \
- (!__cond_lock((_lockref)->lock, !lockref_put_or_lock(_lockref)))
+bool lockref_put_or_lock(struct lockref *lockref) __cond_acquires(false, &lockref->lock);
void lockref_mark_dead(struct lockref *lockref);
bool lockref_get_not_dead(struct lockref *lockref);
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index b92008641242..d48bf0ad26f4 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -73,7 +73,7 @@ struct lsm_static_calls_table {
/**
* struct lsm_id - Identify a Linux Security Module.
- * @lsm: name of the LSM, must be approved by the LSM maintainers
+ * @name: name of the LSM, must be approved by the LSM maintainers
* @id: LSM ID number from uapi/linux/lsm.h
*
* Contains the information that identifies the LSM.
@@ -164,7 +164,7 @@ enum lsm_order {
* @initcall_core: LSM callback for core_initcall() setup, optional
* @initcall_subsys: LSM callback for subsys_initcall() setup, optional
* @initcall_fs: LSM callback for fs_initcall setup, optional
- * @nitcall_device: LSM callback for device_initcall() setup, optional
+ * @initcall_device: LSM callback for device_initcall() setup, optional
* @initcall_late: LSM callback for late_initcall() setup, optional
*/
struct lsm_info {
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index e1555e06e7e5..07c1bfbdb8c4 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -70,14 +70,33 @@ struct cmdq_cb_data {
struct cmdq_pkt *pkt;
};
+struct cmdq_mbox_priv {
+ u8 shift_pa;
+ dma_addr_t mminfra_offset;
+};
+
struct cmdq_pkt {
void *va_base;
dma_addr_t pa_base;
size_t cmd_buf_size; /* command occupied size */
size_t buf_size; /* real buffer size */
+ struct cmdq_mbox_priv priv; /* for generating instruction */
};
/**
+ * cmdq_get_mbox_priv() - get the private data of mailbox channel
+ * @chan: mailbox channel
+ * @priv: pointer to store the private data of mailbox channel
+ *
+ * While generating the GCE instruction to command buffer, the private data
+ * of GCE hardware may need to be referenced, such as the shift bits of
+ * physical address.
+ *
+ * This function should be called before generating the GCE instruction.
+ */
+void cmdq_get_mbox_priv(struct mbox_chan *chan, struct cmdq_mbox_priv *priv);
+
+/**
* cmdq_get_shift_pa() - get the shift bits of physical address
* @chan: mailbox channel
*
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0651865a4564..67f154de10bc 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -949,7 +949,11 @@ static inline void mod_memcg_page_state(struct page *page,
rcu_read_unlock();
}
+unsigned long memcg_events(struct mem_cgroup *memcg, int event);
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
+unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
+bool memcg_stat_item_valid(int idx);
+bool memcg_vm_event_item_valid(enum vm_event_item idx);
unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx);
unsigned long lruvec_page_state_local(struct lruvec *lruvec,
enum node_stat_item idx);
@@ -1037,6 +1041,8 @@ static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
return id;
}
+void mem_cgroup_flush_workqueue(void);
+
extern int mem_cgroup_init(void);
#else /* CONFIG_MEMCG */
@@ -1373,6 +1379,21 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
return 0;
}
+static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
+{
+ return 0;
+}
+
+static inline bool memcg_stat_item_valid(int idx)
+{
+ return false;
+}
+
+static inline bool memcg_vm_event_item_valid(enum vm_event_item idx)
+{
+ return false;
+}
+
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
@@ -1436,6 +1457,8 @@ static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
return 0;
}
+static inline void mem_cgroup_flush_workqueue(void) { }
+
static inline int mem_cgroup_init(void) { return 0; }
#endif /* CONFIG_MEMCG */
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
index cc74de3dbcfe..c328a7b356d0 100644
--- a/include/linux/memfd.h
+++ b/include/linux/memfd.h
@@ -17,6 +17,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
* to by vm_flags_ptr.
*/
int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr);
+struct file *memfd_alloc_file(const char *name, unsigned int flags);
#else
static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
{
@@ -31,6 +32,11 @@ static inline int memfd_check_seals_mmap(struct file *file,
{
return 0;
}
+
+static inline struct file *memfd_alloc_file(const char *name, unsigned int flags)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif
#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 713ec0435b48..e3c2ccf872a8 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -224,7 +224,8 @@ static inline bool is_fsdax_page(const struct page *page)
}
#ifdef CONFIG_ZONE_DEVICE
-void zone_device_page_init(struct page *page, unsigned int order);
+void zone_device_page_init(struct page *page, struct dev_pagemap *pgmap,
+ unsigned int order);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
@@ -234,9 +235,11 @@ bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long memremap_compat_align(void);
-static inline void zone_device_folio_init(struct folio *folio, unsigned int order)
+static inline void zone_device_folio_init(struct folio *folio,
+ struct dev_pagemap *pgmap,
+ unsigned int order)
{
- zone_device_page_init(&folio->page, order);
+ zone_device_page_init(&folio->page, pgmap, order);
if (order)
folio_set_large_rmappable(folio);
}
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 5f70d3b5d1b1..097ef4dfcdac 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -667,7 +667,7 @@ static inline int wm8350_register_irq(struct wm8350 *wm8350, int irq,
return -ENODEV;
return request_threaded_irq(irq + wm8350->irq_base, NULL,
- handler, flags, name, data);
+ handler, flags | IRQF_ONESHOT, name, data);
}
static inline void wm8350_free_irq(struct wm8350 *wm8350, int irq, void *data)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6f959d8ca4b4..73d3500d388e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -608,7 +608,11 @@ enum {
/*
* Flags which should result in page tables being copied on fork. These are
* flags which indicate that the VMA maps page tables which cannot be
- * reconsistuted upon page fault, so necessitate page table copying upon
+ * reconsistuted upon page fault, so necessitate page table copying upon fork.
+ *
+ * Note that these flags should be compared with the DESTINATION VMA not the
+ * source, as VM_UFFD_WP may not be propagated to destination, while all other
+ * flags will be.
*
* VM_PFNMAP / VM_MIXEDMAP - These contain kernel-mapped data which cannot be
* reasonably reconstructed on page fault.
@@ -2975,15 +2979,8 @@ static inline pud_t pud_mkspecial(pud_t pud)
}
#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
-extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
- spinlock_t **ptl);
-static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
- spinlock_t **ptl)
-{
- pte_t *ptep;
- __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
- return ptep;
-}
+extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+ spinlock_t **ptl);
#ifdef __PAGETABLE_P4D_FOLDED
static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
@@ -3337,31 +3334,15 @@ static inline bool pagetable_pte_ctor(struct mm_struct *mm,
return true;
}
-pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
-static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
- pmd_t *pmdvalp)
-{
- pte_t *pte;
+pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
- __cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp));
- return pte;
-}
static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
{
return __pte_offset_map(pmd, addr, NULL);
}
-pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, spinlock_t **ptlp);
-static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, spinlock_t **ptlp)
-{
- pte_t *pte;
-
- __cond_lock(RCU, __cond_lock(*ptlp,
- pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)));
- return pte;
-}
+pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp);
pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 42af2292951d..78950eb8926d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1329,7 +1329,7 @@ struct mm_struct {
* The mm_cpumask needs to be at the end of mm_struct, because it
* is dynamically sized based on nr_cpu_ids.
*/
- unsigned long cpu_bitmap[];
+ char flexible_array[] __aligned(__alignof__(unsigned long));
};
/* Copy value to the first system word of mm flags, non-atomically. */
@@ -1366,19 +1366,24 @@ static inline void __mm_flags_set_mask_bits_word(struct mm_struct *mm,
MT_FLAGS_USE_RCU)
extern struct mm_struct init_mm;
+#define MM_STRUCT_FLEXIBLE_ARRAY_INIT \
+{ \
+ [0 ... sizeof(cpumask_t) + MM_CID_STATIC_SIZE - 1] = 0 \
+}
+
/* Pointer magic because the dynamic array size confuses some compilers. */
static inline void mm_init_cpumask(struct mm_struct *mm)
{
unsigned long cpu_bitmap = (unsigned long)mm;
- cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ cpu_bitmap += offsetof(struct mm_struct, flexible_array);
cpumask_clear((struct cpumask *)cpu_bitmap);
}
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
{
- return (struct cpumask *)&mm->cpu_bitmap;
+ return (struct cpumask *)&mm->flexible_array;
}
#ifdef CONFIG_LRU_GEN
@@ -1469,7 +1474,7 @@ static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm)
{
unsigned long bitmap = (unsigned long)mm;
- bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ bitmap += offsetof(struct mm_struct, flexible_array);
/* Skip cpu_bitmap */
bitmap += cpumask_size();
return (struct cpumask *)bitmap;
@@ -1495,7 +1500,7 @@ static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *
mm_init_cid(mm, p);
return 0;
}
-#define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
+# define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
static inline void mm_destroy_cid(struct mm_struct *mm)
{
@@ -1509,6 +1514,8 @@ static inline unsigned int mm_cid_size(void)
return cpumask_size() + bitmap_size(num_possible_cpus());
}
+/* Use 2 * NR_CPUS as worse case for static allocation. */
+# define MM_CID_STATIC_SIZE (2 * sizeof(cpumask_t))
#else /* CONFIG_SCHED_MM_CID */
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }
static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; }
@@ -1517,11 +1524,13 @@ static inline unsigned int mm_cid_size(void)
{
return 0;
}
+# define MM_CID_STATIC_SIZE 0
#endif /* CONFIG_SCHED_MM_CID */
struct mmu_gather;
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
+void tlb_gather_mmu_vma(struct mmu_gather *tlb, struct vm_area_struct *vma);
extern void tlb_finish_mmu(struct mmu_gather *tlb);
struct vm_fault;
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index ac01dc4eb2ce..ed3dd0f3fe19 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -24,7 +24,7 @@ static inline void leave_mm(void) { }
#ifndef task_cpu_possible_mask
# define task_cpu_possible_mask(p) cpu_possible_mask
# define task_cpu_possible(cpu, p) true
-# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_TICK)
+# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_DOMAIN)
#else
# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 75ef7c9f9307..fc5d6c88d2f0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1648,14 +1648,15 @@ static inline int is_highmem(const struct zone *zone)
return is_highmem_idx(zone_idx(zone));
}
-#ifdef CONFIG_ZONE_DMA
-bool has_managed_dma(void);
-#else
+bool has_managed_zone(enum zone_type zone);
static inline bool has_managed_dma(void)
{
+#ifdef CONFIG_ZONE_DMA
+ return has_managed_zone(ZONE_DMA);
+#else
return false;
-}
#endif
+}
#ifndef CONFIG_NUMA
diff --git a/include/linux/module.h b/include/linux/module.h
index d80c3ea57472..20ddfd97630d 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -151,16 +151,10 @@ extern void cleanup_module(void);
#define __init_or_module
#define __initdata_or_module
#define __initconst_or_module
-#define __INIT_OR_MODULE .text
-#define __INITDATA_OR_MODULE .data
-#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
#else
#define __init_or_module __init
#define __initdata_or_module __initdata
#define __initconst_or_module __initconst
-#define __INIT_OR_MODULE __INIT
-#define __INITDATA_OR_MODULE __INITDATA
-#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
struct module_kobject *lookup_or_create_module_kobject(const char *name);
@@ -770,8 +764,6 @@ static inline bool is_livepatch_module(struct module *mod)
#endif
}
-void set_module_sig_enforced(void);
-
void module_for_each_mod(int(*func)(struct module *mod, void *data), void *data);
#else /* !CONFIG_MODULES... */
@@ -866,10 +858,6 @@ static inline bool module_requested_async_probing(struct module *module)
}
-static inline void set_module_sig_enforced(void)
-{
-}
-
/* Dereference module function descriptor */
static inline
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
@@ -925,6 +913,8 @@ static inline bool retpoline_module_ok(bool has_retpoline)
#ifdef CONFIG_MODULE_SIG
bool is_module_sig_enforced(void);
+void set_module_sig_enforced(void);
+
static inline bool module_sig_ok(struct module *module)
{
return module->sig_ok;
@@ -935,6 +925,10 @@ static inline bool is_module_sig_enforced(void)
return false;
}
+static inline void set_module_sig_enforced(void)
+{
+}
+
static inline bool module_sig_ok(struct module *module)
{
return true;
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 915f32f7d888..c03db3c2fd40 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -355,8 +355,8 @@ static inline void kernel_param_unlock(struct module *mod)
/**
* __core_param_cb - similar like core_param, with a set/get ops instead of type.
* @name: the name of the cmdline and sysfs parameter (often the same as var)
- * @var: the variable
* @ops: the set & get operations for this parameter.
+ * @arg: the variable
* @perm: visibility in sysfs
*
* Ideally this should be called 'core_param_cb', but the name has been
@@ -390,7 +390,7 @@ static inline void kernel_param_unlock(struct module *mod)
* @name1: parameter name 1
* @name2: parameter name 2
*
- * Returns true if the two parameter names are equal.
+ * Returns: true if the two parameter names are equal.
* Dashes (-) are considered equal to underscores (_).
*/
extern bool parameq(const char *name1, const char *name2);
@@ -402,6 +402,10 @@ extern bool parameq(const char *name1, const char *name2);
* @n: the length to compare
*
* Similar to parameq(), except it compares @n characters.
+ *
+ * Returns: true if the first @n characters of the two parameter names
+ * are equal.
+ * Dashes (-) are considered equal to underscores (_).
*/
extern bool parameqn(const char *name1, const char *name2, size_t n);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8003e3218c46..fa41eed62868 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -49,12 +49,12 @@ typedef struct arch_msi_msg_data {
#endif
/**
- * msi_msg - Representation of a MSI message
+ * struct msi_msg - Representation of a MSI message
* @address_lo: Low 32 bits of msi message address
- * @arch_addrlo: Architecture specific shadow of @address_lo
+ * @arch_addr_lo: Architecture specific shadow of @address_lo
* @address_hi: High 32 bits of msi message address
* (only used when device supports it)
- * @arch_addrhi: Architecture specific shadow of @address_hi
+ * @arch_addr_hi: Architecture specific shadow of @address_hi
* @data: MSI message data (usually 16 bits)
* @arch_data: Architecture specific shadow of @data
*/
@@ -91,7 +91,7 @@ typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
struct msi_msg *msg);
/**
- * pci_msi_desc - PCI/MSI specific MSI descriptor data
+ * struct pci_msi_desc - PCI/MSI specific MSI descriptor data
*
* @msi_mask: [PCI MSI] MSI cached mask bits
* @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
@@ -101,6 +101,7 @@ typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
* @can_mask: [PCI MSI/X] Masking supported?
* @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
* @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @msi_attrib: [PCI MSI/X] Compound struct of MSI/X attributes
* @mask_pos: [PCI MSI] Mask register position
* @mask_base: [PCI MSI-X] Mask register base address
*/
@@ -169,7 +170,7 @@ struct msi_desc_data {
* Only used if iommu_msi_shift != 0
* @iommu_msi_shift: Indicates how many bits of the original address should be
* preserved when using iommu_msi_iova.
- * @sysfs_attr: Pointer to sysfs device attribute
+ * @sysfs_attrs: Pointer to sysfs device attribute
*
* @write_msi_msg: Callback that may be called when the MSI message
* address or data changes
@@ -220,7 +221,7 @@ enum msi_desc_filter {
/**
* struct msi_dev_domain - The internals of MSI domain info per device
* @store: Xarray for storing MSI descriptor pointers
- * @irqdomain: Pointer to a per device interrupt domain
+ * @domain: Pointer to a per device interrupt domain
*/
struct msi_dev_domain {
struct xarray store;
@@ -702,7 +703,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void pci_msi_mask_irq(struct irq_data *data);
void pci_msi_unmask_irq(struct irq_data *data);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
-u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node);
+u32 pci_msi_map_rid_ctlr_node(struct irq_domain *domain, struct pci_dev *pdev,
+ struct fwnode_handle **node);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
struct msi_desc *desc);
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
index 56047a4e54c9..255972f3d88d 100644
--- a/include/linux/mtd/jedec.h
+++ b/include/linux/mtd/jedec.h
@@ -2,7 +2,7 @@
/*
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
* Steven J. Hill <sjhill@realitydiluted.com>
- * Thomas Gleixner <tglx@linutronix.de>
+ * Thomas Gleixner <tglx@kernel.org>
*
* Contains all JEDEC related definitions
*/
diff --git a/include/linux/mtd/nand-ecc-sw-hamming.h b/include/linux/mtd/nand-ecc-sw-hamming.h
index c6c71894c575..2aa2f8ef68d2 100644
--- a/include/linux/mtd/nand-ecc-sw-hamming.h
+++ b/include/linux/mtd/nand-ecc-sw-hamming.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
* David Woodhouse <dwmw2@infradead.org>
- * Thomas Gleixner <tglx@linutronix.de>
+ * Thomas Gleixner <tglx@kernel.org>
*
* This file is the header for the NAND Hamming ECC implementation.
*/
diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h
index 98f075b86931..622891191e9c 100644
--- a/include/linux/mtd/ndfc.h
+++ b/include/linux/mtd/ndfc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2006 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (c) 2006 Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
*
* Info:
* Contains defines, datastructures for ndfc nand controller
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index 55ab2e4d62f9..09a5cbd8f232 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -2,7 +2,7 @@
/*
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
* Steven J. Hill <sjhill@realitydiluted.com>
- * Thomas Gleixner <tglx@linutronix.de>
+ * Thomas Gleixner <tglx@kernel.org>
*
* Contains all ONFI related definitions
*/
diff --git a/include/linux/mtd/platnand.h b/include/linux/mtd/platnand.h
index bc11eb6b593b..2df6fba699f2 100644
--- a/include/linux/mtd/platnand.h
+++ b/include/linux/mtd/platnand.h
@@ -2,7 +2,7 @@
/*
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
* Steven J. Hill <sjhill@realitydiluted.com>
- * Thomas Gleixner <tglx@linutronix.de>
+ * Thomas Gleixner <tglx@kernel.org>
*
* Contains all platform NAND related definitions.
*/
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index d30bdc3fcfd7..5c70e7bd3ed5 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -2,7 +2,7 @@
/*
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
* Steven J. Hill <sjhill@realitydiluted.com>
- * Thomas Gleixner <tglx@linutronix.de>
+ * Thomas Gleixner <tglx@kernel.org>
*
* Info:
* Contains standard defines and IDs for NAND flash devices
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bf535f0118bb..ecaa0440f6ec 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -182,13 +182,13 @@ static inline int __must_check __devm_mutex_init(struct device *dev, struct mute
* Also see Documentation/locking/mutex-design.rst.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
+extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
- unsigned int subclass);
+ unsigned int subclass) __cond_acquires(0, lock);
extern int __must_check _mutex_lock_killable(struct mutex *lock,
- unsigned int subclass, struct lockdep_map *nest_lock);
-extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
+ unsigned int subclass, struct lockdep_map *nest_lock) __cond_acquires(0, lock);
+extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
@@ -211,10 +211,10 @@ do { \
_mutex_lock_killable(lock, subclass, NULL)
#else
-extern void mutex_lock(struct mutex *lock);
-extern int __must_check mutex_lock_interruptible(struct mutex *lock);
-extern int __must_check mutex_lock_killable(struct mutex *lock);
-extern void mutex_lock_io(struct mutex *lock);
+extern void mutex_lock(struct mutex *lock) __acquires(lock);
+extern int __must_check mutex_lock_interruptible(struct mutex *lock) __cond_acquires(0, lock);
+extern int __must_check mutex_lock_killable(struct mutex *lock) __cond_acquires(0, lock);
+extern void mutex_lock_io(struct mutex *lock) __acquires(lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
@@ -232,7 +232,7 @@ extern void mutex_lock_io(struct mutex *lock);
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) __cond_acquires(true, lock);
#define mutex_trylock_nest_lock(lock, nest_lock) \
( \
@@ -242,17 +242,27 @@ extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest
#define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL)
#else
-extern int mutex_trylock(struct mutex *lock);
+extern int mutex_trylock(struct mutex *lock) __cond_acquires(true, lock);
#define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock)
#endif
-extern void mutex_unlock(struct mutex *lock);
+extern void mutex_unlock(struct mutex *lock) __releases(lock);
-extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) __cond_acquires(true, lock);
-DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
-DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
-DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T), _RET == 0)
+DEFINE_LOCK_GUARD_1(mutex, struct mutex, mutex_lock(_T->lock), mutex_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(mutex, _try, mutex_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(mutex, _intr, mutex_lock_interruptible(_T->lock), _RET == 0)
+DEFINE_LOCK_GUARD_1(mutex_init, struct mutex, mutex_init(_T->lock), /* */)
+
+DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_try, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_intr, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_intr, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_init, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_init, _T)
extern unsigned long mutex_get_owner(struct mutex *lock);
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
index fdf7f515fde8..80975935ec48 100644
--- a/include/linux/mutex_types.h
+++ b/include/linux/mutex_types.h
@@ -38,7 +38,7 @@
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
-struct mutex {
+context_lock_struct(mutex) {
atomic_long_t owner;
raw_spinlock_t wait_lock;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -59,7 +59,7 @@ struct mutex {
*/
#include <linux/rtmutex.h>
-struct mutex {
+context_lock_struct(mutex) {
struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5870a9e514a5..d99b0fbc1942 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -5323,7 +5323,8 @@ netdev_features_t netdev_increment_features(netdev_features_t all,
static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
netdev_features_t mask)
{
- return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
+ return netdev_increment_features(features, NETIF_F_ALL_TSO |
+ NETIF_F_ALL_FOR_ALL, mask);
}
int __netdev_update_features(struct net_device *dev);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index a6624edb7226..8dd79a3f3d66 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -637,6 +637,7 @@ extern int nfs_update_folio(struct file *file, struct folio *folio,
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_folio(struct inode *inode, struct folio *folio);
+extern int nfs_wb_folio_reclaim(struct inode *inode, struct folio *folio);
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(void);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index cf3c6ab408aa..207156f2143c 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -83,6 +83,7 @@ static inline void reset_hung_task_detector(void) { }
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
extern unsigned int hardlockup_panic;
+extern unsigned long hardlockup_si_mask;
#else
static inline void hardlockup_detector_disable(void) {}
#endif
diff --git a/include/linux/ns/ns_common_types.h b/include/linux/ns/ns_common_types.h
index b332b019b29c..0014fbc1c626 100644
--- a/include/linux/ns/ns_common_types.h
+++ b/include/linux/ns/ns_common_types.h
@@ -108,11 +108,13 @@ extern const struct proc_ns_operations utsns_operations;
* @ns_tree: namespace tree nodes and active reference count
*/
struct ns_common {
+ struct {
+ refcount_t __ns_ref; /* do not use directly */
+ } ____cacheline_aligned_in_smp;
u32 ns_type;
struct dentry *stashed;
const struct proc_ns_operations *ops;
unsigned int inum;
- refcount_t __ns_ref; /* do not use directly */
union {
struct ns_tree;
struct rcu_head ns_rcu;
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 4d103ac8f5c7..b8710c825d64 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -162,8 +162,7 @@ void nubus_seq_write_rsrc_mem(struct seq_file *m,
unsigned char *nubus_dirptr(const struct nubus_dirent *nd);
/* Declarations relating to driver model objects */
-int nubus_parent_device_register(void);
-int nubus_device_register(struct nubus_board *board);
+int nubus_device_register(struct device *parent, struct nubus_board *board);
int nubus_driver_register(struct nubus_driver *ndrv);
void nubus_driver_unregister(struct nubus_driver *ndrv);
int nubus_proc_show(struct seq_file *m, void *data);
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 1c2bc0281807..2a64d8cecaae 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -11,6 +11,30 @@
typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
+struct of_imap_parser {
+ struct device_node *node;
+ const __be32 *imap;
+ const __be32 *imap_end;
+ u32 parent_offset;
+};
+
+struct of_imap_item {
+ struct of_phandle_args parent_args;
+ u32 child_imap_count;
+ u32 child_imap[16]; /* Arbitrary size.
+ * Should be #address-cells + #interrupt-cells but
+ * avoid using allocation and so, expect that 16
+ * should be enough
+ */
+};
+
+/*
+ * If the iterator is exited prematurely (break, goto, return) of_node_put() has
+ * to be called on item.parent_args.np
+ */
+#define for_each_of_imap_item(parser, item) \
+ for (; of_imap_parser_one(parser, item);)
+
/*
* Workarounds only applied to 32bit powermac machines
*/
@@ -49,6 +73,11 @@ extern int of_irq_get_byname(struct device_node *dev, const char *name);
extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
extern struct device_node *of_irq_find_parent(struct device_node *child);
+extern int of_imap_parser_init(struct of_imap_parser *parser,
+ struct device_node *node,
+ struct of_imap_item *item);
+extern struct of_imap_item *of_imap_parser_one(struct of_imap_parser *parser,
+ struct of_imap_item *item);
extern struct irq_domain *of_msi_get_domain(struct device *dev,
const struct device_node *np,
enum irq_domain_bus_token token);
@@ -92,7 +121,17 @@ static inline void *of_irq_find_parent(struct device_node *child)
{
return NULL;
}
-
+static inline int of_imap_parser_init(struct of_imap_parser *parser,
+ struct device_node *node,
+ struct of_imap_item *item)
+{
+ return -ENOSYS;
+}
+static inline struct of_imap_item *of_imap_parser_one(struct of_imap_parser *parser,
+ struct of_imap_item *item)
+{
+ return NULL;
+}
static inline struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token)
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 6de479ebbe5d..ebce402854de 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -145,6 +145,11 @@ enum OID {
OID_id_rsassa_pkcs1_v1_5_with_sha3_384, /* 2.16.840.1.101.3.4.3.15 */
OID_id_rsassa_pkcs1_v1_5_with_sha3_512, /* 2.16.840.1.101.3.4.3.16 */
+ /* NIST FIPS-204 ML-DSA */
+ OID_id_ml_dsa_44, /* 2.16.840.1.101.3.4.3.17 */
+ OID_id_ml_dsa_65, /* 2.16.840.1.101.3.4.3.18 */
+ OID_id_ml_dsa_87, /* 2.16.840.1.101.3.4.3.19 */
+
OID__NR
};
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 736f633b2d5f..6220a2000df8 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -552,4 +552,46 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
(__member_size((name)->array) / sizeof(*(name)->array) + \
__must_be_array((name)->array))
+/**
+ * typeof_flex_counter() - Return the type of the counter variable of a given
+ * flexible array member annotated by __counted_by().
+ * @FAM: Instance of flexible array member within a given struct.
+ *
+ * Returns: "size_t" if no annotation exists.
+ */
+#define typeof_flex_counter(FAM) \
+ typeof(_Generic(__flex_counter(FAM), \
+ void *: (size_t)0, \
+ default: *__flex_counter(FAM)))
+
+/**
+ * overflows_flex_counter_type() - Check if the counter associated with the
+ * given flexible array member can represent
+ * a value.
+ * @TYPE: Type of the struct that contains the @FAM.
+ * @FAM: Member name of the FAM within @TYPE.
+ * @COUNT: Value to check against the __counted_by annotated @FAM's counter.
+ *
+ * Returns: true if @COUNT can be represented in the @FAM's counter. When
+ * @FAM is not annotated with __counted_by(), always returns true.
+ */
+#define overflows_flex_counter_type(TYPE, FAM, COUNT) \
+ (!overflows_type(COUNT, typeof_flex_counter(((TYPE *)NULL)->FAM)))
+
+/**
+ * __set_flex_counter() - Set the counter associated with the given flexible
+ * array member that has been annoated by __counted_by().
+ * @FAM: Instance of flexible array member within a given struct.
+ * @COUNT: Value to store to the __counted_by annotated @FAM_PTR's counter.
+ *
+ * This is a no-op if no annotation exists. Count needs to be checked with
+ * overflows_flex_counter_type() before using this function.
+ */
+#define __set_flex_counter(FAM, COUNT) \
+({ \
+ *_Generic(__flex_counter(FAM), \
+ void *: &(size_t){ 0 }, \
+ default: __flex_counter(FAM)) = (COUNT); \
+})
+
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 31a848485ad9..ec442af3f886 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -210,6 +210,7 @@ enum mapping_flags {
AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM = 9,
AS_KERNEL_FILE = 10, /* mapping for a fake kernel file that shouldn't
account usage to user cgroups */
+ AS_NO_DATA_INTEGRITY = 11, /* no data integrity guarantees */
/* Bits 16-25 are used for FOLIO_ORDER */
AS_FOLIO_ORDER_BITS = 5,
AS_FOLIO_ORDER_MIN = 16,
@@ -345,6 +346,16 @@ static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct addres
return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
+static inline void mapping_set_no_data_integrity(struct address_space *mapping)
+{
+ set_bit(AS_NO_DATA_INTEGRITY, &mapping->flags);
+}
+
+static inline bool mapping_no_data_integrity(const struct address_space *mapping)
+{
+ return test_bit(AS_NO_DATA_INTEGRITY, &mapping->flags);
+}
+
static inline gfp_t mapping_gfp_mask(const struct address_space *mapping)
{
return mapping->gfp_mask;
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 078225b514d4..c0c54baadf04 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -12,7 +12,8 @@
#include <linux/acpi.h>
#ifdef CONFIG_ACPI
-extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev);
+extern acpi_status pci_acpi_add_root_pm_notifier(struct acpi_device *dev,
+ struct acpi_pci_root *pci_root);
static inline acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
{
return acpi_remove_pm_notifier(dev);
diff --git a/include/linux/pci-ide.h b/include/linux/pci-ide.h
index 37a1ad9501b0..ae07d9f699c0 100644
--- a/include/linux/pci-ide.h
+++ b/include/linux/pci-ide.h
@@ -26,7 +26,7 @@ enum pci_ide_partner_select {
/**
* struct pci_ide_partner - Per port pair Selective IDE Stream settings
* @rid_start: Partner Port Requester ID range start
- * @rid_end: Partner Port Requester ID range end
+ * @rid_end: Partner Port Requester ID range end (inclusive)
* @stream_index: Selective IDE Stream Register Block selection
* @mem_assoc: PCI bus memory address association for targeting peer partner
* @pref_assoc: PCI bus prefetchable memory address association for
@@ -82,7 +82,6 @@ struct pci_ide_regs {
* @host_bridge_stream: allocated from host bridge @ide_stream_ida pool
* @stream_id: unique Stream ID (within Partner Port pairing)
* @name: name of the established Selective IDE Stream in sysfs
- * @tsm_dev: For TSM established IDE, the TSM device context
*
* Negative @stream_id values indicate "uninitialized" on the
* expectation that with TSM established IDE the TSM owns the stream_id
@@ -94,7 +93,6 @@ struct pci_ide {
u8 host_bridge_stream;
int stream_id;
const char *name;
- struct tsm_dev *tsm_dev;
};
/*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 864775651c6f..b30631673b5b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -377,6 +377,13 @@ struct pci_dev {
0xffffffff. You only need to change
this if your device has broken DMA
or supports 64-bit transfers. */
+ u64 msi_addr_mask; /* Mask of the bits of bus address for
+ MSI that this device implements.
+ Normally set based on device
+ capabilities. You only need to
+ change this if your device claims
+ to support 64-bit MSI but implements
+ fewer than 64 address bits. */
struct device_dma_parameters dma_parms;
@@ -441,7 +448,6 @@ struct pci_dev {
unsigned int is_busmaster:1; /* Is busmaster */
unsigned int no_msi:1; /* May not use MSI */
- unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
unsigned int block_cfg_access:1; /* Config space access blocked */
unsigned int broken_parity_status:1; /* Generates false positive parity */
unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
@@ -1206,6 +1212,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
struct list_head *resources);
int pci_host_probe(struct pci_host_bridge *bridge);
+void pci_probe_flush_workqueue(void);
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
void pci_bus_release_busn_res(struct pci_bus *b);
@@ -2079,6 +2086,8 @@ static inline int pci_has_flag(int flag) { return 0; }
_PCI_NOP_ALL(read, *)
_PCI_NOP_ALL(write,)
+static inline void pci_probe_flush_workqueue(void) { }
+
static inline struct pci_dev *pci_get_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from)
@@ -2210,6 +2219,10 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
{
return -ENOSPC;
}
+
+static inline void pci_free_irq_vectors(struct pci_dev *dev)
+{
+}
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 288f5235649a..c8cb010d655e 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -161,6 +161,7 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
__percpu_init_rwsem(sem, #sem, &rwsem_key); \
})
+#define percpu_rwsem_is_write_held(sem) lockdep_is_held_type(sem, 0)
#define percpu_rwsem_is_held(sem) lockdep_is_held(sem)
#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 9870d768db4c..48d851fbd8ea 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1,7 +1,7 @@
/*
* Performance events:
*
- * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
* Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
*
@@ -305,6 +305,7 @@ struct perf_event_pmu_context;
#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
#define PERF_PMU_CAP_AUX_PAUSE 0x0200
#define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400
+#define PERF_PMU_CAP_MEDIATED_VPMU 0x0800
/**
* pmu::scope
@@ -998,6 +999,11 @@ struct perf_event_groups {
u64 index;
};
+struct perf_time_ctx {
+ u64 time;
+ u64 stamp;
+ u64 offset;
+};
/**
* struct perf_event_context - event context structure
@@ -1036,9 +1042,12 @@ struct perf_event_context {
/*
* Context clock, runs when context enabled.
*/
- u64 time;
- u64 timestamp;
- u64 timeoffset;
+ struct perf_time_ctx time;
+
+ /*
+ * Context clock, runs when in the guest mode.
+ */
+ struct perf_time_ctx timeguest;
/*
* These fields let us detect when two contexts have both
@@ -1171,9 +1180,8 @@ struct bpf_perf_event_data_kern {
* This is a per-cpu dynamically allocated data structure.
*/
struct perf_cgroup_info {
- u64 time;
- u64 timestamp;
- u64 timeoffset;
+ struct perf_time_ctx time;
+ struct perf_time_ctx timeguest;
int active;
};
@@ -1669,6 +1677,8 @@ struct perf_guest_info_callbacks {
unsigned int (*state)(void);
unsigned long (*get_ip)(void);
unsigned int (*handle_intel_pt_intr)(void);
+
+ void (*handle_mediated_pmi)(void);
};
#ifdef CONFIG_GUEST_PERF_EVENTS
@@ -1678,6 +1688,7 @@ extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
+DECLARE_STATIC_CALL(__perf_guest_handle_mediated_pmi, *perf_guest_cbs->handle_mediated_pmi);
static inline unsigned int perf_guest_state(void)
{
@@ -1694,6 +1705,11 @@ static inline unsigned int perf_guest_handle_intel_pt_intr(void)
return static_call(__perf_guest_handle_intel_pt_intr)();
}
+static inline void perf_guest_handle_mediated_pmi(void)
+{
+ static_call(__perf_guest_handle_mediated_pmi)();
+}
+
extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
@@ -1914,6 +1930,13 @@ extern int perf_event_account_interrupt(struct perf_event *event);
extern int perf_event_period(struct perf_event *event, u64 value);
extern u64 perf_event_pause(struct perf_event *event, bool reset);
+#ifdef CONFIG_PERF_GUEST_MEDIATED_PMU
+int perf_create_mediated_pmu(void);
+void perf_release_mediated_pmu(void);
+void perf_load_guest_context(void);
+void perf_put_guest_context(void);
+#endif
+
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h
deleted file mode 100644
index 7d21e0c41037..000000000000
--- a/include/linux/platform_data/hwmon-s3c.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2005 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * S3C - HWMon interface for ADC
-*/
-
-#ifndef __HWMON_S3C_H__
-#define __HWMON_S3C_H__
-
-/**
- * s3c_hwmon_chcfg - channel configuration
- * @name: The name to give this channel.
- * @mult: Multiply the ADC value read by this.
- * @div: Divide the value from the ADC by this.
- *
- * The value read from the ADC is converted to a value that
- * hwmon expects (mV) by result = (value_read * @mult) / @div.
- */
-struct s3c_hwmon_chcfg {
- const char *name;
- unsigned int mult;
- unsigned int div;
-};
-
-/**
- * s3c_hwmon_pdata - HWMON platform data
- * @in: One configuration for each possible channel used.
- */
-struct s3c_hwmon_pdata {
- struct s3c_hwmon_chcfg *in[8];
-};
-
-#endif /* __HWMON_S3C_H__ */
diff --git a/include/linux/platform_data/mipi-i3c-hci.h b/include/linux/platform_data/mipi-i3c-hci.h
new file mode 100644
index 000000000000..ab7395f455f9
--- /dev/null
+++ b/include/linux/platform_data/mipi-i3c-hci.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef INCLUDE_PLATFORM_DATA_MIPI_I3C_HCI_H
+#define INCLUDE_PLATFORM_DATA_MIPI_I3C_HCI_H
+
+#include <linux/compiler_types.h>
+
+/**
+ * struct mipi_i3c_hci_platform_data - Platform-dependent data for mipi_i3c_hci
+ * @base_regs: Register set base address (to support multi-bus instances)
+ */
+struct mipi_i3c_hci_platform_data {
+ void __iomem *base_regs;
+};
+
+#endif
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 8c1c8adf7fe9..16cf4355b5c1 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -8,7 +8,7 @@
* 2001-2005 (c) MontaVista Software, Inc.
* Daniel Walker <dwalker@mvista.com>
*
- * (C) 2005 Thomas Gleixner <tglx@linutronix.de>
+ * (C) 2005 Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
*
* Simplifications of the original code by
* Oleg Nesterov <oleg@tv-sign.ru>
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 98a899858ece..afcaaa37a812 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -681,10 +681,10 @@ struct dev_pm_info {
struct list_head entry;
struct completion completion;
struct wakeup_source *wakeup;
+ bool work_in_progress; /* Owned by the PM core */
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
- bool work_in_progress:1; /* Owned by the PM core */
bool smart_suspend:1; /* Owned by the PM core */
bool must_resume:1; /* Owned by the PM core */
bool may_skip_resume:1; /* Set by subsystems */
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index e86f3b731da2..9e1892525eac 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -44,8 +44,9 @@ posix_acl_from_xattr(struct user_namespace *user_ns, const void *value,
}
#endif
-int posix_acl_to_xattr(struct user_namespace *user_ns,
- const struct posix_acl *acl, void *buffer, size_t size);
+extern void *posix_acl_to_xattr(struct user_namespace *user_ns, const struct posix_acl *acl,
+ size_t *sizep, gfp_t gfp);
+
static inline const char *posix_acl_xattr_name(int type)
{
switch (type) {
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index c5b30054cd01..7729fef249e1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -31,6 +31,16 @@
#include <asm/processor.h>
#include <linux/context_tracking_irq.h>
+token_context_lock(RCU, __reentrant_ctx_lock);
+token_context_lock_instance(RCU, RCU_SCHED);
+token_context_lock_instance(RCU, RCU_BH);
+
+/*
+ * A convenience macro that can be used for RCU-protected globals or struct
+ * members; adds type qualifier __rcu, and also enforces __guarded_by(RCU).
+ */
+#define __rcu_guarded __rcu __guarded_by(RCU)
+
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
@@ -175,36 +185,7 @@ void rcu_tasks_torture_stats_print(char *tt, char *tf);
# define synchronize_rcu_tasks synchronize_rcu
# endif
-# ifdef CONFIG_TASKS_TRACE_RCU
-// Bits for ->trc_reader_special.b.need_qs field.
-#define TRC_NEED_QS 0x1 // Task needs a quiescent state.
-#define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state.
-
-u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new);
-void rcu_tasks_trace_qs_blkd(struct task_struct *t);
-
-# define rcu_tasks_trace_qs(t) \
- do { \
- int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
- \
- if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \
- likely(!___rttq_nesting)) { \
- rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \
- } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
- !READ_ONCE((t)->trc_reader_special.b.blocked)) { \
- rcu_tasks_trace_qs_blkd(t); \
- } \
- } while (0)
-void rcu_tasks_trace_torture_stats_print(char *tt, char *tf);
-# else
-# define rcu_tasks_trace_qs(t) do { } while (0)
-# endif
-
-#define rcu_tasks_qs(t, preempt) \
-do { \
- rcu_tasks_classic_qs((t), (preempt)); \
- rcu_tasks_trace_qs(t); \
-} while (0)
+#define rcu_tasks_qs(t, preempt) rcu_tasks_classic_qs((t), (preempt))
# ifdef CONFIG_TASKS_RUDE_RCU
void synchronize_rcu_tasks_rude(void);
@@ -425,7 +406,8 @@ static inline void rcu_preempt_sleep_check(void) { }
// See RCU_LOCKDEP_WARN() for an explanation of the double call to
// debug_lockdep_rcu_enabled().
-static inline bool lockdep_assert_rcu_helper(bool c)
+static __always_inline bool lockdep_assert_rcu_helper(bool c, const struct __ctx_lock_RCU *ctx)
+ __assumes_shared_ctx_lock(RCU) __assumes_shared_ctx_lock(ctx)
{
return debug_lockdep_rcu_enabled() &&
(c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) &&
@@ -438,7 +420,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* Splats if lockdep is enabled and there is no rcu_read_lock() in effect.
*/
#define lockdep_assert_in_rcu_read_lock() \
- WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map)))
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map), RCU))
/**
* lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh()
@@ -448,7 +430,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* actual rcu_read_lock_bh() is required.
*/
#define lockdep_assert_in_rcu_read_lock_bh() \
- WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map)))
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map), RCU_BH))
/**
* lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched()
@@ -458,7 +440,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* instead an actual rcu_read_lock_sched() is required.
*/
#define lockdep_assert_in_rcu_read_lock_sched() \
- WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map)))
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map), RCU_SCHED))
/**
* lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader
@@ -476,17 +458,17 @@ static inline bool lockdep_assert_rcu_helper(bool c)
WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \
!lock_is_held(&rcu_bh_lock_map) && \
!lock_is_held(&rcu_sched_lock_map) && \
- preemptible()))
+ preemptible(), RCU))
#else /* #ifdef CONFIG_PROVE_RCU */
#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
-#define lockdep_assert_in_rcu_read_lock() do { } while (0)
-#define lockdep_assert_in_rcu_read_lock_bh() do { } while (0)
-#define lockdep_assert_in_rcu_read_lock_sched() do { } while (0)
-#define lockdep_assert_in_rcu_reader() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock() __assume_shared_ctx_lock(RCU)
+#define lockdep_assert_in_rcu_read_lock_bh() __assume_shared_ctx_lock(RCU_BH)
+#define lockdep_assert_in_rcu_read_lock_sched() __assume_shared_ctx_lock(RCU_SCHED)
+#define lockdep_assert_in_rcu_reader() __assume_shared_ctx_lock(RCU)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -506,11 +488,11 @@ static inline bool lockdep_assert_rcu_helper(bool c)
#endif /* #else #ifdef __CHECKER__ */
#define __unrcu_pointer(p, local) \
-({ \
+context_unsafe( \
typeof(*p) *local = (typeof(*p) *__force)(p); \
rcu_check_sparse(p, __rcu); \
- ((typeof(*p) __force __kernel *)(local)); \
-})
+ ((typeof(*p) __force __kernel *)(local)) \
+)
/**
* unrcu_pointer - mark a pointer as not being RCU protected
* @p: pointer needing to lose its __rcu property
@@ -586,7 +568,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* other macros that it invokes.
*/
#define rcu_assign_pointer(p, v) \
-do { \
+context_unsafe( \
uintptr_t _r_a_p__v = (uintptr_t)(v); \
rcu_check_sparse(p, __rcu); \
\
@@ -594,7 +576,7 @@ do { \
WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
else \
smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
-} while (0)
+)
/**
* rcu_replace_pointer() - replace an RCU pointer, returning its old value
@@ -861,9 +843,10 @@ do { \
* only when acquiring spinlocks that are subject to priority inheritance.
*/
static __always_inline void rcu_read_lock(void)
+ __acquires_shared(RCU)
{
__rcu_read_lock();
- __acquire(RCU);
+ __acquire_shared(RCU);
rcu_lock_acquire(&rcu_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock() used illegally while idle");
@@ -891,11 +874,12 @@ static __always_inline void rcu_read_lock(void)
* See rcu_read_lock() for more information.
*/
static inline void rcu_read_unlock(void)
+ __releases_shared(RCU)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
- __release(RCU);
+ __release_shared(RCU);
__rcu_read_unlock();
}
@@ -914,9 +898,11 @@ static inline void rcu_read_unlock(void)
* was invoked from some other task.
*/
static inline void rcu_read_lock_bh(void)
+ __acquires_shared(RCU) __acquires_shared(RCU_BH)
{
local_bh_disable();
- __acquire(RCU_BH);
+ __acquire_shared(RCU);
+ __acquire_shared(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_bh() used illegally while idle");
@@ -928,11 +914,13 @@ static inline void rcu_read_lock_bh(void)
* See rcu_read_lock_bh() for more information.
*/
static inline void rcu_read_unlock_bh(void)
+ __releases_shared(RCU) __releases_shared(RCU_BH)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
- __release(RCU_BH);
+ __release_shared(RCU_BH);
+ __release_shared(RCU);
local_bh_enable();
}
@@ -952,9 +940,11 @@ static inline void rcu_read_unlock_bh(void)
* rcu_read_lock_sched() was invoked from an NMI handler.
*/
static inline void rcu_read_lock_sched(void)
+ __acquires_shared(RCU) __acquires_shared(RCU_SCHED)
{
preempt_disable();
- __acquire(RCU_SCHED);
+ __acquire_shared(RCU);
+ __acquire_shared(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_sched() used illegally while idle");
@@ -962,9 +952,11 @@ static inline void rcu_read_lock_sched(void)
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
static inline notrace void rcu_read_lock_sched_notrace(void)
+ __acquires_shared(RCU) __acquires_shared(RCU_SCHED)
{
preempt_disable_notrace();
- __acquire(RCU_SCHED);
+ __acquire_shared(RCU);
+ __acquire_shared(RCU_SCHED);
}
/**
@@ -973,22 +965,27 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
* See rcu_read_lock_sched() for more information.
*/
static inline void rcu_read_unlock_sched(void)
+ __releases_shared(RCU) __releases_shared(RCU_SCHED)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
- __release(RCU_SCHED);
+ __release_shared(RCU_SCHED);
+ __release_shared(RCU);
preempt_enable();
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
static inline notrace void rcu_read_unlock_sched_notrace(void)
+ __releases_shared(RCU) __releases_shared(RCU_SCHED)
{
- __release(RCU_SCHED);
+ __release_shared(RCU_SCHED);
+ __release_shared(RCU);
preempt_enable_notrace();
}
static __always_inline void rcu_read_lock_dont_migrate(void)
+ __acquires_shared(RCU)
{
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
migrate_disable();
@@ -996,6 +993,7 @@ static __always_inline void rcu_read_lock_dont_migrate(void)
}
static inline void rcu_read_unlock_migrate(void)
+ __releases_shared(RCU)
{
rcu_read_unlock();
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
@@ -1041,10 +1039,10 @@ static inline void rcu_read_unlock_migrate(void)
* ordering guarantees for either the CPU or the compiler.
*/
#define RCU_INIT_POINTER(p, v) \
- do { \
+ context_unsafe( \
rcu_check_sparse(p, __rcu); \
WRITE_ONCE(p, RCU_INITIALIZER(v)); \
- } while (0)
+ )
/**
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
@@ -1192,18 +1190,7 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
extern int rcu_expedited;
extern int rcu_normal;
-DEFINE_LOCK_GUARD_0(rcu,
- do {
- rcu_read_lock();
- /*
- * sparse doesn't call the cleanup function,
- * so just release immediately and don't track
- * the context. We don't need to anyway, since
- * the whole point of the guard is to not need
- * the explicit unlock.
- */
- __release(RCU);
- } while (0),
- rcu_read_unlock())
+DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
+DECLARE_LOCK_GUARD_0_ATTRS(rcu, __acquires_shared(RCU), __releases_shared(RCU))
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index e6c44eb428ab..cee89e51e45c 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -12,27 +12,74 @@
#include <linux/rcupdate.h>
#include <linux/cleanup.h>
-extern struct lockdep_map rcu_trace_lock_map;
+#ifdef CONFIG_TASKS_TRACE_RCU
+extern struct srcu_struct rcu_tasks_trace_srcu_struct;
+#endif // #ifdef CONFIG_TASKS_TRACE_RCU
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
static inline int rcu_read_lock_trace_held(void)
{
- return lock_is_held(&rcu_trace_lock_map);
+ return srcu_read_lock_held(&rcu_tasks_trace_srcu_struct);
}
-#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#else // #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
static inline int rcu_read_lock_trace_held(void)
{
return 1;
}
-#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#endif // #else // #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
#ifdef CONFIG_TASKS_TRACE_RCU
-void rcu_read_unlock_trace_special(struct task_struct *t);
+/**
+ * rcu_read_lock_tasks_trace - mark beginning of RCU-trace read-side critical section
+ *
+ * When synchronize_rcu_tasks_trace() is invoked by one task, then that
+ * task is guaranteed to block until all other tasks exit their read-side
+ * critical sections. Similarly, if call_rcu_trace() is invoked on one
+ * task while other tasks are within RCU read-side critical sections,
+ * invocation of the corresponding RCU callback is deferred until after
+ * the all the other tasks exit their critical sections.
+ *
+ * For more details, please see the documentation for
+ * srcu_read_lock_fast(). For a description of how implicit RCU
+ * readers provide the needed ordering for architectures defining the
+ * ARCH_WANTS_NO_INSTR Kconfig option (and thus promising never to trace
+ * code where RCU is not watching), please see the __srcu_read_lock_fast()
+ * (non-kerneldoc) header comment. Otherwise, the smp_mb() below provided
+ * the needed ordering.
+ */
+static inline struct srcu_ctr __percpu *rcu_read_lock_tasks_trace(void)
+{
+ struct srcu_ctr __percpu *ret = __srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct);
+
+ rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map);
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
+ smp_mb(); // Provide ordering on noinstr-incomplete architectures.
+ return ret;
+}
+
+/**
+ * rcu_read_unlock_tasks_trace - mark end of RCU-trace read-side critical section
+ * @scp: return value from corresponding rcu_read_lock_tasks_trace().
+ *
+ * Pairs with the preceding call to rcu_read_lock_tasks_trace() that
+ * returned the value passed in via scp.
+ *
+ * For more details, please see the documentation for rcu_read_unlock().
+ * For memory-ordering information, please see the header comment for the
+ * rcu_read_lock_tasks_trace() function.
+ */
+static inline void rcu_read_unlock_tasks_trace(struct srcu_ctr __percpu *scp)
+{
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
+ smp_mb(); // Provide ordering on noinstr-incomplete architectures.
+ __srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp);
+ srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map);
+}
/**
* rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
@@ -50,12 +97,15 @@ static inline void rcu_read_lock_trace(void)
{
struct task_struct *t = current;
- WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
- barrier();
- if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
- t->trc_reader_special.b.need_mb)
- smp_mb(); // Pairs with update-side barriers
- rcu_lock_acquire(&rcu_trace_lock_map);
+ rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map);
+ if (t->trc_reader_nesting++) {
+ // In case we interrupted a Tasks Trace RCU reader.
+ return;
+ }
+ barrier(); // nesting before scp to protect against interrupt handler.
+ t->trc_reader_scp = __srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct);
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
+ smp_mb(); // Placeholder for more selective ordering
}
/**
@@ -69,26 +119,88 @@ static inline void rcu_read_lock_trace(void)
*/
static inline void rcu_read_unlock_trace(void)
{
- int nesting;
+ struct srcu_ctr __percpu *scp;
struct task_struct *t = current;
- rcu_lock_release(&rcu_trace_lock_map);
- nesting = READ_ONCE(t->trc_reader_nesting) - 1;
- barrier(); // Critical section before disabling.
- // Disable IPI-based setting of .need_qs.
- WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
- if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
- WRITE_ONCE(t->trc_reader_nesting, nesting);
- return; // We assume shallow reader nesting.
+ scp = t->trc_reader_scp;
+ barrier(); // scp before nesting to protect against interrupt handler.
+ if (!--t->trc_reader_nesting) {
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
+ smp_mb(); // Placeholder for more selective ordering
+ __srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp);
}
- WARN_ON_ONCE(nesting != 0);
- rcu_read_unlock_trace_special(t);
+ srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map);
}
-void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
-void synchronize_rcu_tasks_trace(void);
-void rcu_barrier_tasks_trace(void);
-struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
+/**
+ * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
+ * @rhp: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a trace rcu-tasks
+ * grace period elapses, in other words after all currently executing
+ * trace rcu-tasks read-side critical sections have completed. These
+ * read-side critical sections are delimited by calls to rcu_read_lock_trace()
+ * and rcu_read_unlock_trace().
+ *
+ * See the description of call_rcu() for more detailed information on
+ * memory ordering guarantees.
+ */
+static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
+{
+ call_srcu(&rcu_tasks_trace_srcu_struct, rhp, func);
+}
+
+/**
+ * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
+ *
+ * Control will return to the caller some time after a trace rcu-tasks
+ * grace period has elapsed, in other words after all currently executing
+ * trace rcu-tasks read-side critical sections have elapsed. These read-side
+ * critical sections are delimited by calls to rcu_read_lock_trace()
+ * and rcu_read_unlock_trace().
+ *
+ * This is a very specialized primitive, intended only for a few uses in
+ * tracing and other situations requiring manipulation of function preambles
+ * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
+ * (yet) intended for heavy use from multiple CPUs.
+ *
+ * See the description of synchronize_rcu() for more detailed information
+ * on memory ordering guarantees.
+ */
+static inline void synchronize_rcu_tasks_trace(void)
+{
+ synchronize_srcu(&rcu_tasks_trace_srcu_struct);
+}
+
+/**
+ * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
+ *
+ * Note that rcu_barrier_tasks_trace() is not obligated to actually wait,
+ * for example, if there are no pending callbacks.
+ */
+static inline void rcu_barrier_tasks_trace(void)
+{
+ srcu_barrier(&rcu_tasks_trace_srcu_struct);
+}
+
+/**
+ * rcu_tasks_trace_expedite_current - Expedite the current Tasks Trace RCU grace period
+ *
+ * Cause the current Tasks Trace RCU grace period to become expedited.
+ * The grace period following the current one might also be expedited.
+ * If there is no current grace period, one might be created. If the
+ * current grace period is currently sleeping, that sleep will complete
+ * before expediting will take effect.
+ */
+static inline void rcu_tasks_trace_expedite_current(void)
+{
+ srcu_expedite_current(&rcu_tasks_trace_srcu_struct);
+}
+
+// Placeholders to enable stepwise transition.
+void __init rcu_tasks_trace_suppress_unused(void);
+
#else
/*
* The BPF JIT forms these addresses even when it doesn't call these
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 80dc023ac2bf..3da377ffb0c2 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -478,9 +478,9 @@ static inline void refcount_dec(refcount_t *r)
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
-extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
-extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(true, lock);
+extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(true, lock);
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
- unsigned long *flags) __cond_acquires(lock);
+ unsigned long *flags) __cond_acquires(true, lock);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 54701668b3df..006e57fd7ca5 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -53,6 +53,7 @@ enum resctrl_res_level {
RDT_RESOURCE_L2,
RDT_RESOURCE_MBA,
RDT_RESOURCE_SMBA,
+ RDT_RESOURCE_PERF_PKG,
/* Must be the last */
RDT_NUM_RESOURCES,
@@ -131,15 +132,24 @@ enum resctrl_domain_type {
* @list: all instances of this resource
* @id: unique id for this instance
* @type: type of this instance
+ * @rid: resource id for this instance
* @cpu_mask: which CPUs share this resource
*/
struct rdt_domain_hdr {
struct list_head list;
int id;
enum resctrl_domain_type type;
+ enum resctrl_res_level rid;
struct cpumask cpu_mask;
};
+static inline bool domain_header_is_valid(struct rdt_domain_hdr *hdr,
+ enum resctrl_domain_type type,
+ enum resctrl_res_level rid)
+{
+ return !WARN_ON_ONCE(hdr->type != type || hdr->rid != rid);
+}
+
/**
* struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource
* @hdr: common header for different domain types
@@ -169,7 +179,7 @@ struct mbm_cntr_cfg {
};
/**
- * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource
+ * struct rdt_l3_mon_domain - group of CPUs sharing RDT_RESOURCE_L3 monitoring
* @hdr: common header for different domain types
* @ci_id: cache info id for this domain
* @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
@@ -183,7 +193,7 @@ struct mbm_cntr_cfg {
* @cntr_cfg: array of assignable counters' configuration (indexed
* by counter ID)
*/
-struct rdt_mon_domain {
+struct rdt_l3_mon_domain {
struct rdt_domain_hdr hdr;
unsigned int ci_id;
unsigned long *rmid_busy_llc;
@@ -261,6 +271,7 @@ enum resctrl_scope {
RESCTRL_L2_CACHE = 2,
RESCTRL_L3_CACHE = 3,
RESCTRL_L3_NODE,
+ RESCTRL_PACKAGE,
};
/**
@@ -284,7 +295,7 @@ enum resctrl_schema_fmt {
* events of monitor groups created via mkdir.
*/
struct resctrl_mon {
- int num_rmid;
+ u32 num_rmid;
unsigned int mbm_cfg_mask;
int num_mbm_cntrs;
bool mbm_cntr_assignable;
@@ -358,10 +369,10 @@ struct resctrl_cpu_defaults {
};
struct resctrl_mon_config_info {
- struct rdt_resource *r;
- struct rdt_mon_domain *d;
- u32 evtid;
- u32 mon_config;
+ struct rdt_resource *r;
+ struct rdt_l3_mon_domain *d;
+ u32 evtid;
+ u32 mon_config;
};
/**
@@ -403,7 +414,8 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
u32 resctrl_arch_system_num_rmid_idx(void);
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
-void resctrl_enable_mon_event(enum resctrl_event_id eventid);
+bool resctrl_enable_mon_event(enum resctrl_event_id eventid, bool any_cpu,
+ unsigned int binary_bits, void *arch_priv);
bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid);
@@ -498,22 +510,31 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type type);
int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
-int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
+int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr);
void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
-void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
+void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr);
void resctrl_online_cpu(unsigned int cpu);
void resctrl_offline_cpu(unsigned int cpu);
+/*
+ * Architecture hook called at beginning of first file system mount attempt.
+ * No locks are held.
+ */
+void resctrl_arch_pre_mount(void);
+
/**
* resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
* for this resource and domain.
* @r: resource that the counter should be read from.
- * @d: domain that the counter should be read from.
+ * @hdr: Header of domain that the counter should be read from.
* @closid: closid that matches the rmid. Depending on the architecture, the
* counter may match traffic of both @closid and @rmid, or @rmid
* only.
* @rmid: rmid of the counter to read.
* @eventid: eventid to read, e.g. L3 occupancy.
+ * @arch_priv: Architecture private data for this event.
+ * The @arch_priv provided by the architecture via
+ * resctrl_enable_mon_event().
* @val: result of the counter read in bytes.
* @arch_mon_ctx: An architecture specific value from
* resctrl_arch_mon_ctx_alloc(), for MPAM this identifies
@@ -529,9 +550,9 @@ void resctrl_offline_cpu(unsigned int cpu);
* Return:
* 0 on success, or -EIO, -EINVAL etc on error.
*/
-int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain_hdr *hdr,
u32 closid, u32 rmid, enum resctrl_event_id eventid,
- u64 *val, void *arch_mon_ctx);
+ void *arch_priv, u64 *val, void *arch_mon_ctx);
/**
* resctrl_arch_rmid_read_context_check() - warn about invalid contexts
@@ -576,7 +597,7 @@ struct rdt_domain_hdr *resctrl_find_domain(struct list_head *h, int id,
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
+void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
u32 closid, u32 rmid,
enum resctrl_event_id eventid);
@@ -589,7 +610,7 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d);
+void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_l3_mon_domain *d);
/**
* resctrl_arch_reset_all_ctrls() - Reset the control for each CLOSID to its
@@ -615,7 +636,7 @@ void resctrl_arch_reset_all_ctrls(struct rdt_resource *r);
*
* This can be called from any CPU.
*/
-void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
enum resctrl_event_id evtid, u32 rmid, u32 closid,
u32 cntr_id, bool assign);
@@ -638,7 +659,7 @@ void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
* Return:
* 0 on success, or -EIO, -EINVAL etc on error.
*/
-int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
u32 closid, u32 rmid, int cntr_id,
enum resctrl_event_id eventid, u64 *val);
@@ -653,7 +674,7 @@ int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_mon_domain *d,
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
u32 closid, u32 rmid, int cntr_id,
enum resctrl_event_id eventid);
diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h
index acfe07860b34..a5f56faa18d2 100644
--- a/include/linux/resctrl_types.h
+++ b/include/linux/resctrl_types.h
@@ -50,6 +50,17 @@ enum resctrl_event_id {
QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
+ /* Intel Telemetry Events */
+ PMT_EVENT_ENERGY,
+ PMT_EVENT_ACTIVITY,
+ PMT_EVENT_STALLS_LLC_HIT,
+ PMT_EVENT_C1_RES,
+ PMT_EVENT_UNHALTED_CORE_CYCLES,
+ PMT_EVENT_STALLS_LLC_MISS,
+ PMT_EVENT_AUTO_C6_RES,
+ PMT_EVENT_UNHALTED_REF_CYCLES,
+ PMT_EVENT_UOPS_RETIRED,
+
/* Must be the last */
QOS_NUM_EVENTS,
};
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 67d2bf579942..9b262109726d 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -6,6 +6,7 @@
#define __LINUX_RESTART_BLOCK_H
#include <linux/compiler.h>
+#include <linux/time64.h>
#include <linux/types.h>
struct __kernel_timespec;
@@ -50,8 +51,7 @@ struct restart_block {
struct pollfd __user *ufds;
int nfds;
int has_timeout;
- unsigned long tv_sec;
- unsigned long tv_nsec;
+ struct timespec64 end_time;
} poll;
};
};
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 08e664b21f5a..133ccb39137a 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -245,16 +245,17 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
-int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
+int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires_shared(RCU);
static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
+ __acquires_shared(RCU)
{
(void)rhashtable_walk_start_check(iter);
}
void *rhashtable_walk_next(struct rhashtable_iter *iter);
void *rhashtable_walk_peek(struct rhashtable_iter *iter);
-void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU);
void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
@@ -325,6 +326,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
static inline unsigned long rht_lock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt)
+ __acquires(__bitlock(0, bkt))
{
unsigned long flags;
@@ -337,6 +339,7 @@ static inline unsigned long rht_lock(struct bucket_table *tbl,
static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bucket,
unsigned int subclass)
+ __acquires(__bitlock(0, bucket))
{
unsigned long flags;
@@ -349,6 +352,7 @@ static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
static inline void rht_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
unsigned long flags)
+ __releases(__bitlock(0, bkt))
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
@@ -424,13 +428,14 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj,
unsigned long flags)
+ __releases(__bitlock(0, bkt))
{
if (rht_is_a_nulls(obj))
obj = NULL;
lock_map_release(&tbl->dep_map);
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(0, bkt));
local_irq_restore(flags);
}
@@ -612,6 +617,7 @@ static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params,
const enum rht_lookup_freq freq)
+ __must_hold_shared(RCU)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
@@ -666,6 +672,7 @@ restart:
static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_NORMAL);
@@ -676,6 +683,7 @@ static __always_inline void *rhashtable_lookup(
static __always_inline void *rhashtable_lookup_likely(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_LIKELY);
@@ -727,6 +735,7 @@ static __always_inline void *rhashtable_lookup_fast(
static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_NORMAL);
@@ -737,6 +746,7 @@ static __always_inline struct rhlist_head *rhltable_lookup(
static __always_inline struct rhlist_head *rhltable_lookup_likely(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_LIKELY);
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
index 2266f4dc77b6..7a01a0760405 100644
--- a/include/linux/rseq.h
+++ b/include/linux/rseq.h
@@ -163,4 +163,15 @@ void rseq_syscall(struct pt_regs *regs);
static inline void rseq_syscall(struct pt_regs *regs) { }
#endif /* !CONFIG_DEBUG_RSEQ */
+#ifdef CONFIG_RSEQ_SLICE_EXTENSION
+void rseq_syscall_enter_work(long syscall);
+int rseq_slice_extension_prctl(unsigned long arg2, unsigned long arg3);
+#else /* CONFIG_RSEQ_SLICE_EXTENSION */
+static inline void rseq_syscall_enter_work(long syscall) { }
+static inline int rseq_slice_extension_prctl(unsigned long arg2, unsigned long arg3)
+{
+ return -ENOTSUPP;
+}
+#endif /* !CONFIG_RSEQ_SLICE_EXTENSION */
+
#endif /* _LINUX_RSEQ_H */
diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h
index a36b472627de..cbc4a791618b 100644
--- a/include/linux/rseq_entry.h
+++ b/include/linux/rseq_entry.h
@@ -15,6 +15,11 @@ struct rseq_stats {
unsigned long cs;
unsigned long clear;
unsigned long fixup;
+ unsigned long s_granted;
+ unsigned long s_expired;
+ unsigned long s_revoked;
+ unsigned long s_yielded;
+ unsigned long s_aborted;
};
DECLARE_PER_CPU(struct rseq_stats, rseq_stats);
@@ -37,6 +42,7 @@ DECLARE_PER_CPU(struct rseq_stats, rseq_stats);
#ifdef CONFIG_RSEQ
#include <linux/jump_label.h>
#include <linux/rseq.h>
+#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/tracepoint-defs.h>
@@ -75,6 +81,147 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_RSEQ_DEBUG_DEFAULT_ENABLE, rseq_debug_enabled);
#define rseq_inline __always_inline
#endif
+#ifdef CONFIG_RSEQ_SLICE_EXTENSION
+DECLARE_STATIC_KEY_TRUE(rseq_slice_extension_key);
+
+static __always_inline bool rseq_slice_extension_enabled(void)
+{
+ return static_branch_likely(&rseq_slice_extension_key);
+}
+
+extern unsigned int rseq_slice_ext_nsecs;
+bool __rseq_arm_slice_extension_timer(void);
+
+static __always_inline bool rseq_arm_slice_extension_timer(void)
+{
+ if (!rseq_slice_extension_enabled())
+ return false;
+
+ if (likely(!current->rseq.slice.state.granted))
+ return false;
+
+ return __rseq_arm_slice_extension_timer();
+}
+
+static __always_inline void rseq_slice_clear_grant(struct task_struct *t)
+{
+ if (IS_ENABLED(CONFIG_RSEQ_STATS) && t->rseq.slice.state.granted)
+ rseq_stat_inc(rseq_stats.s_revoked);
+ t->rseq.slice.state.granted = false;
+}
+
+static __always_inline bool rseq_grant_slice_extension(bool work_pending)
+{
+ struct task_struct *curr = current;
+ struct rseq_slice_ctrl usr_ctrl;
+ union rseq_slice_state state;
+ struct rseq __user *rseq;
+
+ if (!rseq_slice_extension_enabled())
+ return false;
+
+ /* If not enabled or not a return from interrupt, nothing to do. */
+ state = curr->rseq.slice.state;
+ state.enabled &= curr->rseq.event.user_irq;
+ if (likely(!state.state))
+ return false;
+
+ rseq = curr->rseq.usrptr;
+ scoped_user_rw_access(rseq, efault) {
+
+ /*
+ * Quick check conditions where a grant is not possible or
+ * needs to be revoked.
+ *
+ * 1) Any TIF bit which needs to do extra work aside of
+ * rescheduling prevents a grant.
+ *
+ * 2) A previous rescheduling request resulted in a slice
+ * extension grant.
+ */
+ if (unlikely(work_pending || state.granted)) {
+ /* Clear user control unconditionally. No point for checking */
+ unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
+ rseq_slice_clear_grant(curr);
+ return false;
+ }
+
+ unsafe_get_user(usr_ctrl.all, &rseq->slice_ctrl.all, efault);
+ if (likely(!(usr_ctrl.request)))
+ return false;
+
+ /* Grant the slice extention */
+ usr_ctrl.request = 0;
+ usr_ctrl.granted = 1;
+ unsafe_put_user(usr_ctrl.all, &rseq->slice_ctrl.all, efault);
+ }
+
+ rseq_stat_inc(rseq_stats.s_granted);
+
+ curr->rseq.slice.state.granted = true;
+ /* Store expiry time for arming the timer on the way out */
+ curr->rseq.slice.expires = data_race(rseq_slice_ext_nsecs) + ktime_get_mono_fast_ns();
+ /*
+ * This is racy against a remote CPU setting TIF_NEED_RESCHED in
+ * several ways:
+ *
+ * 1)
+ * CPU0 CPU1
+ * clear_tsk()
+ * set_tsk()
+ * clear_preempt()
+ * Raise scheduler IPI on CPU0
+ * --> IPI
+ * fold_need_resched() -> Folds correctly
+ * 2)
+ * CPU0 CPU1
+ * set_tsk()
+ * clear_tsk()
+ * clear_preempt()
+ * Raise scheduler IPI on CPU0
+ * --> IPI
+ * fold_need_resched() <- NOOP as TIF_NEED_RESCHED is false
+ *
+ * #1 is not any different from a regular remote reschedule as it
+ * sets the previously not set bit and then raises the IPI which
+ * folds it into the preempt counter
+ *
+ * #2 is obviously incorrect from a scheduler POV, but it's not
+ * differently incorrect than the code below clearing the
+ * reschedule request with the safety net of the timer.
+ *
+ * The important part is that the clearing is protected against the
+ * scheduler IPI and also against any other interrupt which might
+ * end up waking up a task and setting the bits in the middle of
+ * the operation:
+ *
+ * clear_tsk()
+ * ---> Interrupt
+ * wakeup_on_this_cpu()
+ * set_tsk()
+ * set_preempt()
+ * clear_preempt()
+ *
+ * which would be inconsistent state.
+ */
+ scoped_guard(irq) {
+ clear_tsk_need_resched(curr);
+ clear_preempt_need_resched();
+ }
+ return true;
+
+efault:
+ force_sig(SIGSEGV);
+ return false;
+}
+
+#else /* CONFIG_RSEQ_SLICE_EXTENSION */
+static inline bool rseq_slice_extension_enabled(void) { return false; }
+static inline bool rseq_arm_slice_extension_timer(void) { return false; }
+static inline void rseq_slice_clear_grant(struct task_struct *t) { }
+static inline bool rseq_grant_slice_extension(bool work_pending) { return false; }
+#endif /* !CONFIG_RSEQ_SLICE_EXTENSION */
+
bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr);
bool rseq_debug_validate_ids(struct task_struct *t);
@@ -359,8 +506,15 @@ bool rseq_set_ids_get_csaddr(struct task_struct *t, struct rseq_ids *ids,
unsafe_put_user(ids->mm_cid, &rseq->mm_cid, efault);
if (csaddr)
unsafe_get_user(*csaddr, &rseq->rseq_cs, efault);
+
+ /* Open coded, so it's in the same user access region */
+ if (rseq_slice_extension_enabled()) {
+ /* Unconditionally clear it, no point in conditionals */
+ unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
+ }
}
+ rseq_slice_clear_grant(t);
/* Cache the new values */
t->rseq.ids.cpu_cid = ids->cpu_cid;
rseq_stat_inc(rseq_stats.ids);
@@ -456,8 +610,17 @@ static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct t
*/
u64 csaddr;
- if (unlikely(get_user_inline(csaddr, &rseq->rseq_cs)))
- return false;
+ scoped_user_rw_access(rseq, efault) {
+ unsafe_get_user(csaddr, &rseq->rseq_cs, efault);
+
+ /* Open coded, so it's in the same user access region */
+ if (rseq_slice_extension_enabled()) {
+ /* Unconditionally clear it, no point in conditionals */
+ unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
+ }
+ }
+
+ rseq_slice_clear_grant(t);
if (static_branch_unlikely(&rseq_debug_enabled) || unlikely(csaddr)) {
if (unlikely(!rseq_update_user_cs(t, regs, csaddr)))
@@ -473,6 +636,8 @@ static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct t
u32 node_id = cpu_to_node(ids.cpu_id);
return rseq_update_usr(t, regs, &ids, node_id);
+efault:
+ return false;
}
static __always_inline bool __rseq_exit_to_user_mode_restart(struct pt_regs *regs)
@@ -527,17 +692,19 @@ static __always_inline void clear_tif_rseq(void) { }
static __always_inline bool
rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
{
- if (likely(!test_tif_rseq(ti_work)))
- return false;
-
- if (unlikely(__rseq_exit_to_user_mode_restart(regs))) {
- current->rseq.event.slowpath = true;
- set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
- return true;
+ if (unlikely(test_tif_rseq(ti_work))) {
+ if (unlikely(__rseq_exit_to_user_mode_restart(regs))) {
+ current->rseq.event.slowpath = true;
+ set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ return true;
+ }
+ clear_tif_rseq();
}
-
- clear_tif_rseq();
- return false;
+ /*
+ * Arm the slice extension timer if nothing to do anymore and the
+ * task really goes out to user space.
+ */
+ return rseq_arm_slice_extension_timer();
}
#else /* CONFIG_GENERIC_ENTRY */
@@ -611,6 +778,7 @@ static inline void rseq_syscall_exit_to_user_mode(void) { }
static inline void rseq_irqentry_exit_to_user_mode(void) { }
static inline void rseq_exit_to_user_mode_legacy(void) { }
static inline void rseq_debug_syscall_return(struct pt_regs *regs) { }
+static inline bool rseq_grant_slice_extension(bool work_pending) { return false; }
#endif /* !CONFIG_RSEQ */
#endif /* _LINUX_RSEQ_ENTRY_H */
diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h
index 332dc14b81c9..da5fa6f40294 100644
--- a/include/linux/rseq_types.h
+++ b/include/linux/rseq_types.h
@@ -73,12 +73,39 @@ struct rseq_ids {
};
/**
+ * union rseq_slice_state - Status information for rseq time slice extension
+ * @state: Compound to access the overall state
+ * @enabled: Time slice extension is enabled for the task
+ * @granted: Time slice extension was granted to the task
+ */
+union rseq_slice_state {
+ u16 state;
+ struct {
+ u8 enabled;
+ u8 granted;
+ };
+};
+
+/**
+ * struct rseq_slice - Status information for rseq time slice extension
+ * @state: Time slice extension state
+ * @expires: The time when a grant expires
+ * @yielded: Indicator for rseq_slice_yield()
+ */
+struct rseq_slice {
+ union rseq_slice_state state;
+ u64 expires;
+ u8 yielded;
+};
+
+/**
* struct rseq_data - Storage for all rseq related data
* @usrptr: Pointer to the registered user space RSEQ memory
* @len: Length of the RSEQ region
- * @sig: Signature of critial section abort IPs
+ * @sig: Signature of critical section abort IPs
* @event: Storage for event management
* @ids: Storage for cached CPU ID and MM CID
+ * @slice: Storage for time slice extension data
*/
struct rseq_data {
struct rseq __user *usrptr;
@@ -86,6 +113,9 @@ struct rseq_data {
u32 sig;
struct rseq_event event;
struct rseq_ids ids;
+#ifdef CONFIG_RSEQ_SLICE_EXTENSION
+ struct rseq_slice slice;
+#endif
};
#else /* CONFIG_RSEQ */
@@ -121,8 +151,7 @@ struct mm_cid_pcpu {
/**
* struct mm_mm_cid - Storage for per MM CID data
* @pcpu: Per CPU storage for CIDs associated to a CPU
- * @percpu: Set, when CIDs are in per CPU mode
- * @transit: Set to MM_CID_TRANSIT during a mode change transition phase
+ * @mode: Indicates per CPU and transition mode
* @max_cids: The exclusive maximum CID value for allocation and convergence
* @irq_work: irq_work to handle the affinity mode change case
* @work: Regular work to handle the affinity mode change case
@@ -139,8 +168,7 @@ struct mm_cid_pcpu {
struct mm_mm_cid {
/* Hotpath read mostly members */
struct mm_cid_pcpu __percpu *pcpu;
- unsigned int percpu;
- unsigned int transit;
+ unsigned int mode;
unsigned int max_cids;
/* Rarely used. Moves @lock and @mutex into the second cacheline */
diff --git a/include/linux/rslib.h b/include/linux/rslib.h
index a04dacbdc8ae..a2848f6907e3 100644
--- a/include/linux/rslib.h
+++ b/include/linux/rslib.h
@@ -2,7 +2,7 @@
/*
* Generic Reed Solomon encoder / decoder library
*
- * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
+ * Copyright (C) 2004 Thomas Gleixner (tglx@kernel.org)
*
* RS code lifted from reed solomon library written by Phil Karn
* Copyright 2002 Phil Karn, KA9Q
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 5b87c6f4a243..3390d21c95dd 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -29,16 +29,16 @@ do { \
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
+ extern void do_raw_read_lock(rwlock_t *lock) __acquires_shared(lock);
extern int do_raw_read_trylock(rwlock_t *lock);
- extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
+ extern void do_raw_read_unlock(rwlock_t *lock) __releases_shared(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
-# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
+# define do_raw_read_lock(rwlock) do {__acquire_shared(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
-# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
+# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release_shared(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
@@ -49,8 +49,8 @@ do { \
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required.
*/
-#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
+#define read_trylock(lock) _raw_read_trylock(lock)
+#define write_trylock(lock) _raw_write_trylock(lock)
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
@@ -112,12 +112,7 @@ do { \
} while (0)
#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
-#define write_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- write_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
-})
+#define write_trylock_irqsave(lock, flags) _raw_write_trylock_irqsave(lock, &(flags))
#ifdef arch_rwlock_is_contended
#define rwlock_is_contended(lock) \
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 31d3d1116323..61a852609eab 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -15,24 +15,24 @@
* Released under the General Public License (GPL).
*/
-void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
-void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
__acquires(lock);
unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
__acquires(lock);
-int __lockfunc _raw_read_trylock(rwlock_t *lock);
-int __lockfunc _raw_write_trylock(rwlock_t *lock);
-void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
+int __lockfunc _raw_read_trylock(rwlock_t *lock) __cond_acquires_shared(true, lock);
+int __lockfunc _raw_write_trylock(rwlock_t *lock) __cond_acquires(true, lock);
+void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc
_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
@@ -137,6 +137,16 @@ static inline int __raw_write_trylock(rwlock_t *lock)
return 0;
}
+static inline bool _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock) __no_context_analysis
+{
+ local_irq_save(*flags);
+ if (_raw_write_trylock(lock))
+ return true;
+ local_irq_restore(*flags);
+ return false;
+}
+
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -145,6 +155,7 @@ static inline int __raw_write_trylock(rwlock_t *lock)
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
static inline void __raw_read_lock(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
@@ -152,6 +163,7 @@ static inline void __raw_read_lock(rwlock_t *lock)
}
static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
unsigned long flags;
@@ -163,6 +175,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
}
static inline void __raw_read_lock_irq(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
local_irq_disable();
preempt_disable();
@@ -171,6 +184,7 @@ static inline void __raw_read_lock_irq(rwlock_t *lock)
}
static inline void __raw_read_lock_bh(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
@@ -178,6 +192,7 @@ static inline void __raw_read_lock_bh(rwlock_t *lock)
}
static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
unsigned long flags;
@@ -189,6 +204,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
}
static inline void __raw_write_lock_irq(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
local_irq_disable();
preempt_disable();
@@ -197,6 +213,7 @@ static inline void __raw_write_lock_irq(rwlock_t *lock)
}
static inline void __raw_write_lock_bh(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -204,6 +221,7 @@ static inline void __raw_write_lock_bh(rwlock_t *lock)
}
static inline void __raw_write_lock(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -211,6 +229,7 @@ static inline void __raw_write_lock(rwlock_t *lock)
}
static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
+ __acquires(lock) __no_context_analysis
{
preempt_disable();
rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
@@ -220,6 +239,7 @@ static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_write_unlock(rwlock_t *lock)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
@@ -227,6 +247,7 @@ static inline void __raw_write_unlock(rwlock_t *lock)
}
static inline void __raw_read_unlock(rwlock_t *lock)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -235,6 +256,7 @@ static inline void __raw_read_unlock(rwlock_t *lock)
static inline void
__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -243,6 +265,7 @@ __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
}
static inline void __raw_read_unlock_irq(rwlock_t *lock)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -251,6 +274,7 @@ static inline void __raw_read_unlock_irq(rwlock_t *lock)
}
static inline void __raw_read_unlock_bh(rwlock_t *lock)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -259,6 +283,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock)
static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
@@ -267,6 +292,7 @@ static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
}
static inline void __raw_write_unlock_irq(rwlock_t *lock)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
@@ -275,6 +301,7 @@ static inline void __raw_write_unlock_irq(rwlock_t *lock)
}
static inline void __raw_write_unlock_bh(rwlock_t *lock)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
index 7d81fc6918ee..5353abbfdc0b 100644
--- a/include/linux/rwlock_rt.h
+++ b/include/linux/rwlock_rt.h
@@ -24,26 +24,29 @@ do { \
__rt_rwlock_init(rwl, #rwl, &__key); \
} while (0)
-extern void rt_read_lock(rwlock_t *rwlock) __acquires(rwlock);
-extern int rt_read_trylock(rwlock_t *rwlock);
-extern void rt_read_unlock(rwlock_t *rwlock) __releases(rwlock);
+extern void rt_read_lock(rwlock_t *rwlock) __acquires_shared(rwlock);
+extern int rt_read_trylock(rwlock_t *rwlock) __cond_acquires_shared(true, rwlock);
+extern void rt_read_unlock(rwlock_t *rwlock) __releases_shared(rwlock);
extern void rt_write_lock(rwlock_t *rwlock) __acquires(rwlock);
extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(rwlock);
-extern int rt_write_trylock(rwlock_t *rwlock);
+extern int rt_write_trylock(rwlock_t *rwlock) __cond_acquires(true, rwlock);
extern void rt_write_unlock(rwlock_t *rwlock) __releases(rwlock);
static __always_inline void read_lock(rwlock_t *rwlock)
+ __acquires_shared(rwlock)
{
rt_read_lock(rwlock);
}
static __always_inline void read_lock_bh(rwlock_t *rwlock)
+ __acquires_shared(rwlock)
{
local_bh_disable();
rt_read_lock(rwlock);
}
static __always_inline void read_lock_irq(rwlock_t *rwlock)
+ __acquires_shared(rwlock)
{
rt_read_lock(rwlock);
}
@@ -55,37 +58,43 @@ static __always_inline void read_lock_irq(rwlock_t *rwlock)
flags = 0; \
} while (0)
-#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
+#define read_trylock(lock) rt_read_trylock(lock)
static __always_inline void read_unlock(rwlock_t *rwlock)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void read_unlock_bh(rwlock_t *rwlock)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
local_bh_enable();
}
static __always_inline void read_unlock_irq(rwlock_t *rwlock)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock,
unsigned long flags)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void write_lock(rwlock_t *rwlock)
+ __acquires(rwlock)
{
rt_write_lock(rwlock);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
+ __acquires(rwlock)
{
rt_write_lock_nested(rwlock, subclass);
}
@@ -94,12 +103,14 @@ static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
#endif
static __always_inline void write_lock_bh(rwlock_t *rwlock)
+ __acquires(rwlock)
{
local_bh_disable();
rt_write_lock(rwlock);
}
static __always_inline void write_lock_irq(rwlock_t *rwlock)
+ __acquires(rwlock)
{
rt_write_lock(rwlock);
}
@@ -111,36 +122,38 @@ static __always_inline void write_lock_irq(rwlock_t *rwlock)
flags = 0; \
} while (0)
-#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
+#define write_trylock(lock) rt_write_trylock(lock)
-#define write_trylock_irqsave(lock, flags) \
-({ \
- int __locked; \
- \
- typecheck(unsigned long, flags); \
- flags = 0; \
- __locked = write_trylock(lock); \
- __locked; \
-})
+static __always_inline bool _write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
+ __cond_acquires(true, rwlock)
+{
+ *flags = 0;
+ return rt_write_trylock(rwlock);
+}
+#define write_trylock_irqsave(lock, flags) _write_trylock_irqsave(lock, &(flags))
static __always_inline void write_unlock(rwlock_t *rwlock)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
}
static __always_inline void write_unlock_bh(rwlock_t *rwlock)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
local_bh_enable();
}
static __always_inline void write_unlock_irq(rwlock_t *rwlock)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
}
static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock,
unsigned long flags)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
}
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index 1948442e7750..d5e7316401e7 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -22,7 +22,7 @@
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
-typedef struct {
+context_lock_struct(rwlock) {
arch_rwlock_t raw_lock;
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
@@ -31,7 +31,8 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} rwlock_t;
+};
+typedef struct rwlock rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed
@@ -54,13 +55,14 @@ typedef struct {
#include <linux/rwbase_rt.h>
-typedef struct {
+context_lock_struct(rwlock) {
struct rwbase_rt rwbase;
atomic_t readers;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} rwlock_t;
+};
+typedef struct rwlock rwlock_t;
#define __RWLOCK_RT_INITIALIZER(name) \
{ \
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index f1aaf676a874..9bf1d93d3d7b 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -45,7 +45,7 @@
* reduce the chance that they will share the same cacheline causing
* cacheline bouncing problem.
*/
-struct rw_semaphore {
+context_lock_struct(rw_semaphore) {
atomic_long_t count;
/*
* Write owner or one of the read owners as well flags regarding
@@ -76,11 +76,13 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
}
static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
}
static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
}
@@ -148,7 +150,7 @@ extern bool is_rwsem_reader_owned(struct rw_semaphore *sem);
#include <linux/rwbase_rt.h>
-struct rw_semaphore {
+context_lock_struct(rw_semaphore) {
struct rwbase_rt rwbase;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -180,11 +182,13 @@ static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
}
static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(!rwsem_is_locked(sem));
}
static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
}
@@ -202,6 +206,7 @@ static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
*/
static inline void rwsem_assert_held(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
if (IS_ENABLED(CONFIG_LOCKDEP))
lockdep_assert_held(sem);
@@ -210,6 +215,7 @@ static inline void rwsem_assert_held(const struct rw_semaphore *sem)
}
static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
if (IS_ENABLED(CONFIG_LOCKDEP))
lockdep_assert_held_write(sem);
@@ -220,48 +226,66 @@ static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
/*
* lock for reading
*/
-extern void down_read(struct rw_semaphore *sem);
-extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
-extern int __must_check down_read_killable(struct rw_semaphore *sem);
+extern void down_read(struct rw_semaphore *sem) __acquires_shared(sem);
+extern int __must_check down_read_interruptible(struct rw_semaphore *sem) __cond_acquires_shared(0, sem);
+extern int __must_check down_read_killable(struct rw_semaphore *sem) __cond_acquires_shared(0, sem);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-extern int down_read_trylock(struct rw_semaphore *sem);
+extern int down_read_trylock(struct rw_semaphore *sem) __cond_acquires_shared(true, sem);
/*
* lock for writing
*/
-extern void down_write(struct rw_semaphore *sem);
-extern int __must_check down_write_killable(struct rw_semaphore *sem);
+extern void down_write(struct rw_semaphore *sem) __acquires(sem);
+extern int __must_check down_write_killable(struct rw_semaphore *sem) __cond_acquires(0, sem);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-extern int down_write_trylock(struct rw_semaphore *sem);
+extern int down_write_trylock(struct rw_semaphore *sem) __cond_acquires(true, sem);
/*
* release a read lock
*/
-extern void up_read(struct rw_semaphore *sem);
+extern void up_read(struct rw_semaphore *sem) __releases_shared(sem);
/*
* release a write lock
*/
-extern void up_write(struct rw_semaphore *sem);
-
-DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
-DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
-DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T), _RET == 0)
-
-DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
-DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
-DEFINE_GUARD_COND(rwsem_write, _kill, down_write_killable(_T), _RET == 0)
+extern void up_write(struct rw_semaphore *sem) __releases(sem);
+
+DEFINE_LOCK_GUARD_1(rwsem_read, struct rw_semaphore, down_read(_T->lock), up_read(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_read, _try, down_read_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_read, _intr, down_read_interruptible(_T->lock), _RET == 0)
+
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
+#define class_rwsem_read_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read_try, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
+#define class_rwsem_read_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read_intr, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
+#define class_rwsem_read_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read_intr, _T)
+
+DEFINE_LOCK_GUARD_1(rwsem_write, struct rw_semaphore, down_write(_T->lock), up_write(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_write, _try, down_write_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_write, _kill, down_write_killable(_T->lock), _RET == 0)
+
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_write_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_try, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_write_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_kill, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_write_kill_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_kill, _T)
+
+DEFINE_LOCK_GUARD_1(rwsem_init, struct rw_semaphore, init_rwsem(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_init, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_init, _T)
/*
* downgrade write lock to read lock
*/
-extern void downgrade_write(struct rw_semaphore *sem);
+extern void downgrade_write(struct rw_semaphore *sem) __releases(sem) __acquires_shared(sem);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -277,11 +301,11 @@ extern void downgrade_write(struct rw_semaphore *sem);
* lockdep_set_class() at lock initialization time.
* See Documentation/locking/lockdep-design.rst for more details.)
*/
-extern void down_read_nested(struct rw_semaphore *sem, int subclass);
-extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
-extern void down_write_nested(struct rw_semaphore *sem, int subclass);
-extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
-extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
+extern void down_read_nested(struct rw_semaphore *sem, int subclass) __acquires_shared(sem);
+extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass) __cond_acquires_shared(0, sem);
+extern void down_write_nested(struct rw_semaphore *sem, int subclass) __acquires(sem);
+extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass) __cond_acquires(0, sem);
+extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock) __acquires(sem);
# define down_write_nest_lock(sem, nest_lock) \
do { \
@@ -295,8 +319,8 @@ do { \
* [ This API should be avoided as much as possible - the
* proper abstraction for this case is completions. ]
*/
-extern void down_read_non_owner(struct rw_semaphore *sem);
-extern void up_read_non_owner(struct rw_semaphore *sem);
+extern void down_read_non_owner(struct rw_semaphore *sem) __acquires_shared(sem);
+extern void up_read_non_owner(struct rw_semaphore *sem) __releases_shared(sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d395f2810fac..36ae08ca0c62 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -586,15 +586,10 @@ struct sched_entity {
u64 sum_exec_runtime;
u64 prev_sum_exec_runtime;
u64 vruntime;
- union {
- /*
- * When !@on_rq this field is vlag.
- * When cfs_rq->curr == se (which implies @on_rq)
- * this field is vprot. See protect_slice().
- */
- s64 vlag;
- u64 vprot;
- };
+ /* Approximated virtual lag: */
+ s64 vlag;
+ /* 'Protected' deadline, to give out minimum quantums: */
+ u64 vprot;
u64 slice;
u64 nr_migrations;
@@ -945,11 +940,7 @@ struct task_struct {
#ifdef CONFIG_TASKS_TRACE_RCU
int trc_reader_nesting;
- int trc_ipi_to_cpu;
- union rcu_special trc_reader_special;
- struct list_head trc_holdout_list;
- struct list_head trc_blkd_node;
- int trc_blkd_cpu;
+ struct srcu_ctr __percpu *trc_reader_scp;
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
struct sched_info sched_info;
@@ -960,7 +951,6 @@ struct task_struct {
struct mm_struct *mm;
struct mm_struct *active_mm;
- struct address_space *faults_disabled_mapping;
int exit_state;
int exit_code;
@@ -1190,6 +1180,7 @@ struct task_struct {
#ifdef CONFIG_IO_URING
struct io_uring_task *io_uring;
+ struct io_restriction *io_uring_restrict;
#endif
/* Namespaces: */
@@ -1776,6 +1767,11 @@ static __always_inline bool is_percpu_thread(void)
(current->nr_cpus_allowed == 1);
}
+static __always_inline bool is_user_task(struct task_struct *task)
+{
+ return task->mm && !(task->flags & (PF_KTHREAD | PF_USER_WORKER));
+}
+
/* Per-process atomic flags. */
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
@@ -1874,7 +1870,6 @@ static inline int task_nice(const struct task_struct *p)
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
-extern int available_idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern void sched_set_fifo(struct task_struct *p);
@@ -2094,9 +2089,9 @@ static inline int _cond_resched(void)
_cond_resched(); \
})
-extern int __cond_resched_lock(spinlock_t *lock);
-extern int __cond_resched_rwlock_read(rwlock_t *lock);
-extern int __cond_resched_rwlock_write(rwlock_t *lock);
+extern int __cond_resched_lock(spinlock_t *lock) __must_hold(lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock) __must_hold_shared(lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock) __must_hold(lock);
#define MIGHT_RESCHED_RCU_SHIFT 8
#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index 5f8fd5b24a2e..e90efaf6d26e 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_SCHED_CPUTIME_H
#define _LINUX_SCHED_CPUTIME_H
+#include <linux/static_call_types.h>
#include <linux/sched/signal.h>
/*
@@ -180,4 +181,21 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
extern unsigned long long
task_sched_runtime(struct task_struct *task);
+#ifdef CONFIG_PARAVIRT
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+#ifdef CONFIG_HAVE_PV_STEAL_CLOCK_GEN
+u64 dummy_steal_clock(int cpu);
+
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return static_call(pv_steal_clock)(cpu);
+}
+#endif
+#endif
+
#endif /* _LINUX_SCHED_CPUTIME_H */
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index d8501f4709b5..dc3975ff1b2e 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -2,13 +2,21 @@
#define _LINUX_SCHED_ISOLATION_H
#include <linux/cpumask.h>
-#include <linux/cpuset.h>
#include <linux/init.h>
#include <linux/tick.h>
enum hk_type {
+ /* Inverse of boot-time isolcpus= argument */
+ HK_TYPE_DOMAIN_BOOT,
+ /*
+ * Same as HK_TYPE_DOMAIN_BOOT but also includes the
+ * inverse of cpuset isolated partitions. As such it
+ * is always a subset of HK_TYPE_DOMAIN_BOOT.
+ */
HK_TYPE_DOMAIN,
+ /* Inverse of boot-time isolcpus=managed_irq argument */
HK_TYPE_MANAGED_IRQ,
+ /* Inverse of boot-time nohz_full= or isolcpus=nohz arguments */
HK_TYPE_KERNEL_NOISE,
HK_TYPE_MAX,
@@ -31,6 +39,7 @@ extern const struct cpumask *housekeeping_cpumask(enum hk_type type);
extern bool housekeeping_enabled(enum hk_type type);
extern void housekeeping_affine(struct task_struct *t, enum hk_type type);
extern bool housekeeping_test_cpu(int cpu, enum hk_type type);
+extern int housekeeping_update(struct cpumask *isol_mask);
extern void __init housekeeping_init(void);
#else
@@ -58,6 +67,7 @@ static inline bool housekeeping_test_cpu(int cpu, enum hk_type type)
return true;
}
+static inline int housekeeping_update(struct cpumask *isol_mask) { return 0; }
static inline void housekeeping_init(void) { }
#endif /* CONFIG_CPU_ISOLATION */
@@ -72,9 +82,7 @@ static inline bool housekeeping_cpu(int cpu, enum hk_type type)
static inline bool cpu_is_isolated(int cpu)
{
- return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) ||
- !housekeeping_test_cpu(cpu, HK_TYPE_TICK) ||
- cpuset_cpu_is_isolated(cpu);
+ return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN);
}
#endif /* _LINUX_SCHED_ISOLATION_H */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 0e1d73955fa5..95d0040df584 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -325,6 +325,7 @@ static inline void might_alloc(gfp_t gfp_mask)
/**
* memalloc_flags_save - Add a PF_* flag to current->flags, save old value
+ * @flags: Flags to add.
*
* This allows PF_* flags to be conveniently added, irrespective of current
* value, and then the old version restored with memalloc_flags_restore().
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 7d6449982822..a22248aebcf9 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -737,21 +737,13 @@ static inline int thread_group_empty(struct task_struct *p)
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
-extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
- unsigned long *flags);
-
-static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
- unsigned long *flags)
-{
- struct sighand_struct *ret;
-
- ret = __lock_task_sighand(task, flags);
- (void)__cond_lock(&task->sighand->siglock, ret);
- return ret;
-}
+extern struct sighand_struct *lock_task_sighand(struct task_struct *task,
+ unsigned long *flags)
+ __acquires(&task->sighand->siglock);
static inline void unlock_task_sighand(struct task_struct *task,
unsigned long *flags)
+ __releases(&task->sighand->siglock)
{
spin_unlock_irqrestore(&task->sighand->siglock, *flags);
}
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 525aa2a632b2..41ed884cffc9 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -214,15 +214,19 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
* write_lock_irq(&tasklist_lock), neither inside nor outside.
*/
static inline void task_lock(struct task_struct *p)
+ __acquires(&p->alloc_lock)
{
spin_lock(&p->alloc_lock);
}
static inline void task_unlock(struct task_struct *p)
+ __releases(&p->alloc_lock)
{
spin_unlock(&p->alloc_lock);
}
-DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
+DEFINE_LOCK_GUARD_1(task_lock, struct task_struct, task_lock(_T->lock), task_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(task_lock, __acquires(&_T->alloc_lock), __releases(&(*(struct task_struct **)_T)->alloc_lock))
+#define class_task_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(task_lock, _T)
#endif /* _LINUX_SCHED_TASK_H */
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 0f28b4623ad4..765bbc3d54be 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -66,6 +66,7 @@ extern void wake_up_q(struct wake_q_head *head);
/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */
static inline
void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock(lock);
@@ -77,6 +78,7 @@ void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
static inline
void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock_irq(lock);
@@ -89,6 +91,7 @@ void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
static inline
void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags,
struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock_irqrestore(lock, flags);
diff --git a/include/linux/scmi_imx_protocol.h b/include/linux/scmi_imx_protocol.h
index 27bd372cbfb1..2407d7693b6b 100644
--- a/include/linux/scmi_imx_protocol.h
+++ b/include/linux/scmi_imx_protocol.h
@@ -59,6 +59,8 @@ struct scmi_imx_misc_proto_ops {
u32 *num, u32 *val);
int (*misc_ctrl_req_notify)(const struct scmi_protocol_handle *ph,
u32 ctrl_id, u32 evt_id, u32 flags);
+ int (*misc_syslog)(const struct scmi_protocol_handle *ph, u16 *size,
+ void *array);
};
/* See LMM_ATTRIBUTES in imx95.rst */
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 1690706206e8..c022403c599a 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -151,6 +151,4 @@ static inline struct pci_dev *screen_info_pci_dev(const struct screen_info *si)
}
#endif
-extern struct screen_info screen_info;
-
#endif /* _SCREEN_INFO_H */
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 221123660e71..5a40252b8334 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -14,6 +14,7 @@
*/
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/kcsan-checks.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
@@ -832,6 +833,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
* Return: count, to be passed to read_seqretry()
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
return read_seqcount_begin(&sl->seqcount);
}
@@ -848,6 +850,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
* Return: true if a read section retry is required, else false
*/
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ __releases_shared(sl) __no_context_analysis
{
return read_seqcount_retry(&sl->seqcount, start);
}
@@ -872,6 +875,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
* _irqsave or _bh variants of this function instead.
*/
static inline void write_seqlock(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
spin_lock(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -885,6 +889,7 @@ static inline void write_seqlock(seqlock_t *sl)
* critical section of given seqlock_t.
*/
static inline void write_sequnlock(seqlock_t *sl)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
@@ -898,6 +903,7 @@ static inline void write_sequnlock(seqlock_t *sl)
* other write side sections, can be invoked from softirq contexts.
*/
static inline void write_seqlock_bh(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
spin_lock_bh(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -912,6 +918,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
* write_seqlock_bh().
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
@@ -925,6 +932,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
* other write sections, can be invoked from hardirq contexts.
*/
static inline void write_seqlock_irq(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
spin_lock_irq(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -938,12 +946,14 @@ static inline void write_seqlock_irq(seqlock_t *sl)
* seqlock_t write side section opened with write_seqlock_irq().
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}
static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
unsigned long flags;
@@ -976,6 +986,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
*/
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
@@ -998,6 +1009,7 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
* The opened read section must be closed with read_sequnlock_excl().
*/
static inline void read_seqlock_excl(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
spin_lock(&sl->lock);
}
@@ -1007,6 +1019,7 @@ static inline void read_seqlock_excl(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl(seqlock_t *sl)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock(&sl->lock);
}
@@ -1021,6 +1034,7 @@ static inline void read_sequnlock_excl(seqlock_t *sl)
* from softirq contexts.
*/
static inline void read_seqlock_excl_bh(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
spin_lock_bh(&sl->lock);
}
@@ -1031,6 +1045,7 @@ static inline void read_seqlock_excl_bh(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl_bh(seqlock_t *sl)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock_bh(&sl->lock);
}
@@ -1045,6 +1060,7 @@ static inline void read_sequnlock_excl_bh(seqlock_t *sl)
* hardirq context.
*/
static inline void read_seqlock_excl_irq(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
spin_lock_irq(&sl->lock);
}
@@ -1055,11 +1071,13 @@ static inline void read_seqlock_excl_irq(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl_irq(seqlock_t *sl)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock_irq(&sl->lock);
}
static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
unsigned long flags;
@@ -1089,6 +1107,7 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
*/
static inline void
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock_irqrestore(&sl->lock, flags);
}
@@ -1125,6 +1144,7 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
* parameter of the next read_seqbegin_or_lock() iteration.
*/
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+ __acquires_shared(lock) __no_context_analysis
{
if (!(*seq & 1)) /* Even */
*seq = read_seqbegin(lock);
@@ -1140,6 +1160,7 @@ static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
* Return: true if a read section retry is required, false otherwise
*/
static inline int need_seqretry(seqlock_t *lock, int seq)
+ __releases_shared(lock) __no_context_analysis
{
return !(seq & 1) && read_seqretry(lock, seq);
}
@@ -1153,6 +1174,7 @@ static inline int need_seqretry(seqlock_t *lock, int seq)
* with read_seqbegin_or_lock() and validated by need_seqretry().
*/
static inline void done_seqretry(seqlock_t *lock, int seq)
+ __no_context_analysis
{
if (seq & 1)
read_sequnlock_excl(lock);
@@ -1180,6 +1202,7 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
*/
static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
+ __acquires_shared(lock) __no_context_analysis
{
unsigned long flags = 0;
@@ -1205,6 +1228,7 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
*/
static inline void
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
+ __no_context_analysis
{
if (seq & 1)
read_sequnlock_excl_irqrestore(lock, flags);
@@ -1225,6 +1249,7 @@ struct ss_tmp {
};
static __always_inline void __scoped_seqlock_cleanup(struct ss_tmp *sst)
+ __no_context_analysis
{
if (sst->lock)
spin_unlock(sst->lock);
@@ -1254,6 +1279,7 @@ extern void __scoped_seqlock_bug(void);
static __always_inline void
__scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
+ __no_context_analysis
{
switch (sst->state) {
case ss_done:
@@ -1296,22 +1322,31 @@ __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
}
}
+/*
+ * Context analysis no-op helper to release seqlock at the end of the for-scope;
+ * the alias analysis of the compiler will recognize that the pointer @s is an
+ * alias to @_seqlock passed to read_seqbegin(_seqlock) below.
+ */
+static __always_inline void __scoped_seqlock_cleanup_ctx(struct ss_tmp **s)
+ __releases_shared(*((seqlock_t **)s)) __no_context_analysis {}
+
#define __scoped_seqlock_read(_seqlock, _target, _s) \
for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \
- { .state = ss_lockless, .data = read_seqbegin(_seqlock) }; \
+ { .state = ss_lockless, .data = read_seqbegin(_seqlock) }, \
+ *__UNIQUE_ID(ctx) __cleanup(__scoped_seqlock_cleanup_ctx) =\
+ (struct ss_tmp *)_seqlock; \
_s.state != ss_done; \
__scoped_seqlock_next(&_s, _seqlock, _target))
/**
- * scoped_seqlock_read (lock, ss_state) - execute the read side critical
- * section without manual sequence
- * counter handling or calls to other
- * helpers
- * @lock: pointer to seqlock_t protecting the data
- * @ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} indicating
- * the type of critical read section
+ * scoped_seqlock_read() - execute the read-side critical section
+ * without manual sequence counter handling
+ * or calls to other helpers
+ * @_seqlock: pointer to seqlock_t protecting the data
+ * @_target: an enum ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless}
+ * indicating the type of critical read section
*
- * Example:
+ * Example::
*
* scoped_seqlock_read (&lock, ss_lock) {
* // read-side critical section
@@ -1323,4 +1358,8 @@ __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
#define scoped_seqlock_read(_seqlock, _target) \
__scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock))
+DEFINE_LOCK_GUARD_1(seqlock_init, seqlock_t, seqlock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(seqlock_init, __acquires(_T), __releases(*(seqlock_t **)_T))
+#define class_seqlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(seqlock_init, _T)
+
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/seqlock_types.h b/include/linux/seqlock_types.h
index dfdf43e3fa3d..2d5d793ef660 100644
--- a/include/linux/seqlock_types.h
+++ b/include/linux/seqlock_types.h
@@ -81,13 +81,14 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
* - Comments on top of seqcount_t
* - Documentation/locking/seqlock.rst
*/
-typedef struct {
+context_lock_struct(seqlock) {
/*
* Make sure that readers don't starve writers on PREEMPT_RT: use
* seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
*/
seqcount_spinlock_t seqcount;
spinlock_t lock;
-} seqlock_t;
+};
+typedef struct seqlock seqlock_t;
#endif /* __LINUX_SEQLOCK_TYPES_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 86737076101d..112e48970338 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4301,6 +4301,18 @@ skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
skb_headlen(skb), buffer);
}
+/* Variant of skb_header_pointer() where @offset is user-controlled
+ * and potentially negative.
+ */
+static inline void * __must_check
+skb_header_pointer_careful(const struct sk_buff *skb, int offset,
+ int len, void *buffer)
+{
+ if (unlikely(offset < 0 && -offset > skb_headroom(skb)))
+ return NULL;
+ return skb_header_pointer(skb, offset, len, buffer);
+}
+
static inline void * __must_check
skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len)
{
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 49847888c287..829b281d6c9c 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -97,6 +97,8 @@ struct sk_psock {
struct sk_buff_head ingress_skb;
struct list_head ingress_msg;
spinlock_t ingress_lock;
+ /** @msg_tot_len: Total bytes queued in ingress_msg list. */
+ u32 msg_tot_len;
unsigned long state;
struct list_head link;
spinlock_t link_lock;
@@ -141,6 +143,8 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
int len, int flags);
+int __sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ int len, int flags, int *copied_from_self);
bool sk_msg_is_readable(struct sock *sk);
static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
@@ -319,6 +323,27 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}
+static inline u32 sk_psock_get_msg_len_nolock(struct sk_psock *psock)
+{
+ /* Used by ioctl to read msg_tot_len only; lock-free for performance */
+ return READ_ONCE(psock->msg_tot_len);
+}
+
+static inline void sk_psock_msg_len_add_locked(struct sk_psock *psock, int diff)
+{
+ /* Use WRITE_ONCE to ensure correct read in sk_psock_get_msg_len_nolock().
+ * ingress_lock should be held to prevent concurrent updates to msg_tot_len
+ */
+ WRITE_ONCE(psock->msg_tot_len, psock->msg_tot_len + diff);
+}
+
+static inline void sk_psock_msg_len_add(struct sk_psock *psock, int diff)
+{
+ spin_lock_bh(&psock->ingress_lock);
+ sk_psock_msg_len_add_locked(psock, diff);
+ spin_unlock_bh(&psock->ingress_lock);
+}
+
static inline bool sk_psock_queue_msg(struct sk_psock *psock,
struct sk_msg *msg)
{
@@ -327,6 +352,7 @@ static inline bool sk_psock_queue_msg(struct sk_psock *psock,
spin_lock_bh(&psock->ingress_lock);
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
list_add_tail(&msg->list, &psock->ingress_msg);
+ sk_psock_msg_len_add_locked(psock, msg->sg.size);
ret = true;
} else {
sk_msg_free(psock->sk, msg);
@@ -343,18 +369,25 @@ static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
spin_lock_bh(&psock->ingress_lock);
msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
- if (msg)
+ if (msg) {
list_del(&msg->list);
+ sk_psock_msg_len_add_locked(psock, -msg->sg.size);
+ }
spin_unlock_bh(&psock->ingress_lock);
return msg;
}
+static inline struct sk_msg *sk_psock_peek_msg_locked(struct sk_psock *psock)
+{
+ return list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
+}
+
static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
{
struct sk_msg *msg;
spin_lock_bh(&psock->ingress_lock);
- msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
+ msg = sk_psock_peek_msg_locked(psock);
spin_unlock_bh(&psock->ingress_lock);
return msg;
}
@@ -521,6 +554,39 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
return !!psock->saved_data_ready;
}
+/* for tcp only, sk is locked */
+static inline ssize_t sk_psock_msg_inq(struct sock *sk)
+{
+ struct sk_psock *psock;
+ ssize_t inq = 0;
+
+ psock = sk_psock_get(sk);
+ if (likely(psock)) {
+ inq = sk_psock_get_msg_len_nolock(psock);
+ sk_psock_put(sk, psock);
+ }
+ return inq;
+}
+
+/* for udp only, sk is not locked */
+static inline ssize_t sk_msg_first_len(struct sock *sk)
+{
+ struct sk_psock *psock;
+ struct sk_msg *msg;
+ ssize_t inq = 0;
+
+ psock = sk_psock_get(sk);
+ if (likely(psock)) {
+ spin_lock_bh(&psock->ingress_lock);
+ msg = sk_psock_peek_msg_locked(psock);
+ if (msg)
+ inq = msg->sg.size;
+ spin_unlock_bh(&psock->ingress_lock);
+ sk_psock_put(sk, psock);
+ }
+ return inq;
+}
+
#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
#define BPF_F_STRPARSER (1UL << 1)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2482992248dc..7701b38cedec 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -12,6 +12,7 @@
#ifndef _LINUX_SLAB_H
#define _LINUX_SLAB_H
+#include <linux/bug.h>
#include <linux/cache.h>
#include <linux/gfp.h>
#include <linux/overflow.h>
@@ -965,6 +966,111 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
#define kmalloc_nolock(...) alloc_hooks(kmalloc_nolock_noprof(__VA_ARGS__))
+/**
+ * __alloc_objs - Allocate objects of a given type using
+ * @KMALLOC: which size-based kmalloc wrapper to allocate with.
+ * @GFP: GFP flags for the allocation.
+ * @TYPE: type to allocate space for.
+ * @COUNT: how many @TYPE objects to allocate.
+ *
+ * Returns: Newly allocated pointer to (first) @TYPE of @COUNT-many
+ * allocated @TYPE objects, or NULL on failure.
+ */
+#define __alloc_objs(KMALLOC, GFP, TYPE, COUNT) \
+({ \
+ const size_t __obj_size = size_mul(sizeof(TYPE), COUNT); \
+ (TYPE *)KMALLOC(__obj_size, GFP); \
+})
+
+/**
+ * __alloc_flex - Allocate an object that has a trailing flexible array
+ * @KMALLOC: kmalloc wrapper function to use for allocation.
+ * @GFP: GFP flags for the allocation.
+ * @TYPE: type of structure to allocate space for.
+ * @FAM: The name of the flexible array member of @TYPE structure.
+ * @COUNT: how many @FAM elements to allocate space for.
+ *
+ * Returns: Newly allocated pointer to @TYPE with @COUNT-many trailing
+ * @FAM elements, or NULL on failure or if @COUNT cannot be represented
+ * by the member of @TYPE that counts the @FAM elements (annotated via
+ * __counted_by()).
+ */
+#define __alloc_flex(KMALLOC, GFP, TYPE, FAM, COUNT) \
+({ \
+ const size_t __count = (COUNT); \
+ const size_t __obj_size = struct_size_t(TYPE, FAM, __count); \
+ TYPE *__obj_ptr; \
+ if (WARN_ON_ONCE(overflows_flex_counter_type(TYPE, FAM, __count))) \
+ __obj_ptr = NULL; \
+ else \
+ __obj_ptr = KMALLOC(__obj_size, GFP); \
+ if (__obj_ptr) \
+ __set_flex_counter(__obj_ptr->FAM, __count); \
+ __obj_ptr; \
+})
+
+/**
+ * kmalloc_obj - Allocate a single instance of the given type
+ * @VAR_OR_TYPE: Variable or type to allocate.
+ * @GFP: GFP flags for the allocation.
+ *
+ * Returns: newly allocated pointer to a @VAR_OR_TYPE on success, or NULL
+ * on failure.
+ */
+#define kmalloc_obj(VAR_OR_TYPE, GFP) \
+ __alloc_objs(kmalloc, GFP, typeof(VAR_OR_TYPE), 1)
+
+/**
+ * kmalloc_objs - Allocate an array of the given type
+ * @VAR_OR_TYPE: Variable or type to allocate an array of.
+ * @COUNT: How many elements in the array.
+ * @GFP: GFP flags for the allocation.
+ *
+ * Returns: newly allocated pointer to array of @VAR_OR_TYPE on success,
+ * or NULL on failure.
+ */
+#define kmalloc_objs(VAR_OR_TYPE, COUNT, GFP) \
+ __alloc_objs(kmalloc, GFP, typeof(VAR_OR_TYPE), COUNT)
+
+/**
+ * kmalloc_flex - Allocate a single instance of the given flexible structure
+ * @VAR_OR_TYPE: Variable or type to allocate (with its flex array).
+ * @FAM: The name of the flexible array member of the structure.
+ * @COUNT: How many flexible array member elements are desired.
+ * @GFP: GFP flags for the allocation.
+ *
+ * Returns: newly allocated pointer to @VAR_OR_TYPE on success, NULL on
+ * failure. If @FAM has been annotated with __counted_by(), the allocation
+ * will immediately fail if @COUNT is larger than what the type of the
+ * struct's counter variable can represent.
+ */
+#define kmalloc_flex(VAR_OR_TYPE, FAM, COUNT, GFP) \
+ __alloc_flex(kmalloc, GFP, typeof(VAR_OR_TYPE), FAM, COUNT)
+
+/* All kzalloc aliases for kmalloc_(obj|objs|flex). */
+#define kzalloc_obj(P, GFP) \
+ __alloc_objs(kzalloc, GFP, typeof(P), 1)
+#define kzalloc_objs(P, COUNT, GFP) \
+ __alloc_objs(kzalloc, GFP, typeof(P), COUNT)
+#define kzalloc_flex(P, FAM, COUNT, GFP) \
+ __alloc_flex(kzalloc, GFP, typeof(P), FAM, COUNT)
+
+/* All kvmalloc aliases for kmalloc_(obj|objs|flex). */
+#define kvmalloc_obj(P, GFP) \
+ __alloc_objs(kvmalloc, GFP, typeof(P), 1)
+#define kvmalloc_objs(P, COUNT, GFP) \
+ __alloc_objs(kvmalloc, GFP, typeof(P), COUNT)
+#define kvmalloc_flex(P, FAM, COUNT, GFP) \
+ __alloc_flex(kvmalloc, GFP, typeof(P), FAM, COUNT)
+
+/* All kvzalloc aliases for kmalloc_(obj|objs|flex). */
+#define kvzalloc_obj(P, GFP) \
+ __alloc_objs(kvzalloc, GFP, typeof(P), 1)
+#define kvzalloc_objs(P, COUNT, GFP) \
+ __alloc_objs(kvzalloc, GFP, typeof(P), COUNT)
+#define kvzalloc_flex(P, FAM, COUNT, GFP) \
+ __alloc_flex(kvzalloc, GFP, typeof(P), FAM, COUNT)
+
#define kmem_buckets_alloc(_b, _size, _flags) \
alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
diff --git a/include/linux/soc/airoha/airoha_offload.h b/include/linux/soc/airoha/airoha_offload.h
index 4d23cbb7d407..d01ef4a6b3d7 100644
--- a/include/linux/soc/airoha/airoha_offload.h
+++ b/include/linux/soc/airoha/airoha_offload.h
@@ -52,8 +52,8 @@ static inline void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
{
}
-static inline int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev,
- void *type_data)
+static inline int airoha_ppe_dev_setup_tc_block_cb(struct airoha_ppe_dev *dev,
+ void *type_data)
{
return -EOPNOTSUPP;
}
@@ -71,12 +71,12 @@ static inline void airoha_ppe_dev_check_skb(struct airoha_ppe_dev *dev,
#define NPU_RX1_DESC_NUM 512
/* CTRL */
-#define NPU_RX_DMA_DESC_LAST_MASK BIT(29)
-#define NPU_RX_DMA_DESC_LEN_MASK GENMASK(28, 15)
-#define NPU_RX_DMA_DESC_CUR_LEN_MASK GENMASK(14, 1)
+#define NPU_RX_DMA_DESC_LAST_MASK BIT(27)
+#define NPU_RX_DMA_DESC_LEN_MASK GENMASK(26, 14)
+#define NPU_RX_DMA_DESC_CUR_LEN_MASK GENMASK(13, 1)
#define NPU_RX_DMA_DESC_DONE_MASK BIT(0)
/* INFO */
-#define NPU_RX_DMA_PKT_COUNT_MASK GENMASK(31, 28)
+#define NPU_RX_DMA_PKT_COUNT_MASK GENMASK(31, 29)
#define NPU_RX_DMA_PKT_ID_MASK GENMASK(28, 26)
#define NPU_RX_DMA_SRC_PORT_MASK GENMASK(25, 21)
#define NPU_RX_DMA_CRSN_MASK GENMASK(20, 16)
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
index 736f53018017..bda3c528b515 100644
--- a/include/linux/soc/apple/rtkit.h
+++ b/include/linux/soc/apple/rtkit.h
@@ -126,6 +126,13 @@ int apple_rtkit_wake(struct apple_rtkit *rtk);
int apple_rtkit_shutdown(struct apple_rtkit *rtk);
/*
+ * Put the co-processor into the lowest power state. Note that it usually
+ * is not possible to recover from this state without a full SoC reset.
+ */
+
+int apple_rtkit_poweroff(struct apple_rtkit *rtk);
+
+/*
* Put the co-processor into idle mode
*/
int apple_rtkit_idle(struct apple_rtkit *rtk);
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index 0c3906e8ad19..a06b5a61f337 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -23,6 +23,8 @@
#define CMDQ_THR_SPR_IDX2 (2)
#define CMDQ_THR_SPR_IDX3 (3)
+#define CMDQ_SUBSYS_INVALID (U8_MAX)
+
struct cmdq_pkt;
enum cmdq_logic_op {
@@ -52,8 +54,20 @@ struct cmdq_operand {
struct cmdq_client_reg {
u8 subsys;
+ phys_addr_t pa_base;
u16 offset;
u16 size;
+
+ /*
+ * Client only uses these functions for MMIO access,
+ * so doesn't need to handle the mminfra_offset.
+ * The mminfra_offset is used for DRAM access and
+ * is handled internally by CMDQ APIs.
+ */
+ int (*pkt_write)(struct cmdq_pkt *pkt, u8 subsys, u32 pa_base,
+ u16 offset, u32 value);
+ int (*pkt_write_mask)(struct cmdq_pkt *pkt, u8 subsys, u32 pa_base,
+ u16 offset, u32 value, u32 mask);
};
struct cmdq_client {
@@ -122,6 +136,32 @@ void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt);
int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value);
/**
+ * cmdq_pkt_write_pa() - append write command to the CMDQ packet with pa_base
+ * @pkt: the CMDQ packet
+ * @subsys: unused parameter
+ * @pa_base: the physical address base of the hardware register
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/,
+ u32 pa_base, u16 offset, u32 value);
+
+/**
+ * cmdq_pkt_write_subsys() - append write command to the CMDQ packet with subsys
+ * @pkt: the CMDQ packet
+ * @subsys: the CMDQ sub system code
+ * @pa_base: unused parameter
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_subsys(struct cmdq_pkt *pkt, u8 subsys,
+ u32 pa_base /*unused*/, u16 offset, u32 value);
+
+/**
* cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet
* @pkt: the CMDQ packet
* @subsys: the CMDQ sub system code
@@ -134,6 +174,34 @@ int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value);
int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
+/**
+ * cmdq_pkt_write_mask_pa() - append write command with mask to the CMDQ packet with pa
+ * @pkt: the CMDQ packet
+ * @subsys: unused parameter
+ * @pa_base: the physical address base of the hardware register
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ * @mask: the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_mask_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/,
+ u32 pa_base, u16 offset, u32 value, u32 mask);
+
+/**
+ * cmdq_pkt_write_mask_subsys() - append write command with mask to the CMDQ packet with subsys
+ * @pkt: the CMDQ packet
+ * @subsys: the CMDQ sub system code
+ * @pa_base: unused parameter
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ * @mask: the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_mask_subsys(struct cmdq_pkt *pkt, u8 subsys,
+ u32 pa_base /*unused*/, u16 offset, u32 value, u32 mask);
+
/*
* cmdq_pkt_read_s() - append read_s command to the CMDQ packet
* @pkt: the CMDQ packet
@@ -418,12 +486,37 @@ static inline int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u3
return -ENOENT;
}
+static inline int cmdq_pkt_write_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/,
+ u32 pa_base, u16 offset, u32 value)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_subsys(struct cmdq_pkt *pkt, u8 subsys,
+ u32 pa_base /*unused*/, u16 offset, u32 value)
+{
+ return -ENOENT;
+}
+
static inline int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask)
{
return -ENOENT;
}
+static inline int cmdq_pkt_write_mask_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/,
+ u32 pa_base, u16 offset, u32 value, u32 mask)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_mask_subsys(struct cmdq_pkt *pkt, u8 subsys,
+ u32 pa_base /*unused*/, u16 offset,
+ u32 value, u32 mask)
+{
+ return -ENOENT;
+}
+
static inline int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
u16 addr_low, u16 reg_idx)
{
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 0287f9182c4d..8243ab3a12a8 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -74,13 +74,17 @@
#define LLCC_CAMSRTIP 73
#define LLCC_CAMRTRF 74
#define LLCC_CAMSRTRF 75
+#define LLCC_OOBM_NS 81
+#define LLCC_OOBM_S 82
#define LLCC_VIDEO_APV 83
#define LLCC_COMPUTE1 87
#define LLCC_CPUSS_OPP 88
#define LLCC_CPUSSMPAM 89
+#define LLCC_VIDSC_VSP1 91
#define LLCC_CAM_IPE_STROV 92
#define LLCC_CAM_OFE_STROV 93
#define LLCC_CPUSS_HEU 94
+#define LLCC_PCIE_TCU 97
#define LLCC_MDM_PNG_FIXED 100
/**
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index 8ea8230579a2..82372e0db0a1 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -10,19 +10,19 @@
struct device;
struct firmware;
-struct qcom_scm_pas_metadata;
+struct qcom_scm_pas_context;
#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
ssize_t qcom_mdt_get_size(const struct firmware *fw);
-int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
- const char *fw_name, int pas_id, phys_addr_t mem_phys,
- struct qcom_scm_pas_metadata *pas_metadata_ctx);
int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base);
+int qcom_mdt_pas_load(struct qcom_scm_pas_context *ctx, const struct firmware *fw,
+ const char *firmware, void *mem_region, phys_addr_t *reloc_base);
+
int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
const char *fw_name, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
@@ -37,13 +37,6 @@ static inline ssize_t qcom_mdt_get_size(const struct firmware *fw)
return -ENODEV;
}
-static inline int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
- const char *fw_name, int pas_id, phys_addr_t mem_phys,
- struct qcom_scm_pas_metadata *pas_metadata_ctx)
-{
- return -ENODEV;
-}
-
static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id,
void *mem_region, phys_addr_t mem_phys,
@@ -52,6 +45,13 @@ static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
return -ENODEV;
}
+static inline int qcom_mdt_pas_load(struct qcom_scm_pas_context *ctx,
+ const struct firmware *fw, const char *firmware,
+ void *mem_region, phys_addr_t *reloc_base)
+{
+ return -ENODEV;
+}
+
static inline int qcom_mdt_load_no_init(struct device *dev,
const struct firmware *fw,
const char *fw_name, void *mem_region,
diff --git a/include/linux/soc/qcom/ubwc.h b/include/linux/soc/qcom/ubwc.h
index 0a4edfe3d96d..f052e241736c 100644
--- a/include/linux/soc/qcom/ubwc.h
+++ b/include/linux/soc/qcom/ubwc.h
@@ -8,6 +8,7 @@
#define __QCOM_UBWC_H__
#include <linux/bits.h>
+#include <linux/printk.h>
#include <linux/types.h>
struct qcom_ubwc_cfg_data {
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index d3561c4a080e..e1e2f144af9b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -212,7 +212,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
* various methods are defined as nops in the case they are not
* required.
*/
-#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
+#define raw_spin_trylock(lock) _raw_spin_trylock(lock)
#define raw_spin_lock(lock) _raw_spin_lock(lock)
@@ -283,22 +283,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
} while (0)
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
-#define raw_spin_trylock_bh(lock) \
- __cond_lock(lock, _raw_spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock) _raw_spin_trylock_bh(lock)
-#define raw_spin_trylock_irq(lock) \
-({ \
- local_irq_disable(); \
- raw_spin_trylock(lock) ? \
- 1 : ({ local_irq_enable(); 0; }); \
-})
+#define raw_spin_trylock_irq(lock) _raw_spin_trylock_irq(lock)
-#define raw_spin_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- raw_spin_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
-})
+#define raw_spin_trylock_irqsave(lock, flags) _raw_spin_trylock_irqsave(lock, &(flags))
#ifndef CONFIG_PREEMPT_RT
/* Include rwlock functions for !RT */
@@ -347,16 +336,19 @@ do { \
#endif
static __always_inline void spin_lock(spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
raw_spin_lock(&lock->rlock);
}
static __always_inline void spin_lock_bh(spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
raw_spin_lock_bh(&lock->rlock);
}
static __always_inline int spin_trylock(spinlock_t *lock)
+ __cond_acquires(true, lock) __no_context_analysis
{
return raw_spin_trylock(&lock->rlock);
}
@@ -364,14 +356,17 @@ static __always_inline int spin_trylock(spinlock_t *lock)
#define spin_lock_nested(lock, subclass) \
do { \
raw_spin_lock_nested(spinlock_check(lock), subclass); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
static __always_inline void spin_lock_irq(spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
raw_spin_lock_irq(&lock->rlock);
}
@@ -379,47 +374,57 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
#define spin_lock_irqsave(lock, flags) \
do { \
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
#define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
static __always_inline void spin_unlock(spinlock_t *lock)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock(&lock->rlock);
}
static __always_inline void spin_unlock_bh(spinlock_t *lock)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock_bh(&lock->rlock);
}
static __always_inline void spin_unlock_irq(spinlock_t *lock)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock_irq(&lock->rlock);
}
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
static __always_inline int spin_trylock_bh(spinlock_t *lock)
+ __cond_acquires(true, lock) __no_context_analysis
{
return raw_spin_trylock_bh(&lock->rlock);
}
static __always_inline int spin_trylock_irq(spinlock_t *lock)
+ __cond_acquires(true, lock) __no_context_analysis
{
return raw_spin_trylock_irq(&lock->rlock);
}
-#define spin_trylock_irqsave(lock, flags) \
-({ \
- raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
-})
+static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock) __no_context_analysis
+{
+ return raw_spin_trylock_irqsave(spinlock_check(lock), *flags);
+}
+#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
/**
* spin_is_locked() - Check whether a spinlock is locked.
@@ -497,23 +502,17 @@ static inline int rwlock_needbreak(rwlock_t *lock)
* Decrements @atomic by 1. If the result is 0, returns true and locks
* @lock. Returns false for all other cases.
*/
-extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
-#define atomic_dec_and_lock(atomic, lock) \
- __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+extern int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) __cond_acquires(true, lock);
extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
- unsigned long *flags);
-#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
- __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+ unsigned long *flags) __cond_acquires(true, lock);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))
-extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
-#define atomic_dec_and_raw_lock(atomic, lock) \
- __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
+extern int atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) __cond_acquires(true, lock);
extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
- unsigned long *flags);
-#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
- __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
+ unsigned long *flags) __cond_acquires(true, lock);
+#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags))
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
@@ -535,86 +534,144 @@ void free_bucket_spinlocks(spinlock_t *locks);
DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
raw_spin_lock(_T->lock),
raw_spin_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_try, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
raw_spin_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_nested_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
raw_spin_lock_irq(_T->lock),
raw_spin_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
raw_spin_lock_bh(_T->lock),
raw_spin_unlock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
raw_spin_lock_irqsave(_T->lock, _T->flags),
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
raw_spin_trylock_irqsave(_T->lock, _T->flags))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, _T)
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_init, raw_spinlock_t, raw_spin_lock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_init, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_init, _T)
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
spin_lock(_T->lock),
spin_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_try, _T)
DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
spin_lock_irq(_T->lock),
spin_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
spin_trylock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq_try, _T)
DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t,
spin_lock_bh(_T->lock),
spin_unlock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try,
spin_trylock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh_try, _T)
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
spin_lock_irqsave(_T->lock, _T->flags),
spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
spin_trylock_irqsave(_T->lock, _T->flags))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, _T)
+
+DEFINE_LOCK_GUARD_1(spinlock_init, spinlock_t, spin_lock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_init, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_init, _T)
DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
read_lock(_T->lock),
read_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(read_lock, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_read_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock, _T)
DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
read_lock_irq(_T->lock),
read_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_read_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irq, _T)
DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
read_lock_irqsave(_T->lock, _T->flags),
read_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_read_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irqsave, _T)
DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
write_lock(_T->lock),
write_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(write_lock, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_write_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock, _T)
DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
write_lock_irq(_T->lock),
write_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_write_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irq, _T)
DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
write_lock_irqsave(_T->lock, _T->flags),
write_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_write_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irqsave, _T)
+
+DEFINE_LOCK_GUARD_1(rwlock_init, rwlock_t, rwlock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(rwlock_init, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_rwlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwlock_init, _T)
#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 9ecb0ab504e3..bda5e7a390cd 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -34,8 +34,8 @@ unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
unsigned long __lockfunc
_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
__acquires(lock);
-int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
-int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) __cond_acquires(true, lock);
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) __cond_acquires(true, lock);
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
@@ -84,6 +84,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
#endif
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
{
preempt_disable();
if (do_raw_spin_trylock(lock)) {
@@ -94,6 +95,26 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return 0;
}
+static __always_inline bool _raw_spin_trylock_irq(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ local_irq_disable();
+ if (_raw_spin_trylock(lock))
+ return true;
+ local_irq_enable();
+ return false;
+}
+
+static __always_inline bool _raw_spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ local_irq_save(*flags);
+ if (_raw_spin_trylock(lock))
+ return true;
+ local_irq_restore(*flags);
+ return false;
+}
+
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are
@@ -102,6 +123,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
unsigned long flags;
@@ -113,6 +135,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
}
static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
local_irq_disable();
preempt_disable();
@@ -121,6 +144,7 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
}
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -128,6 +152,7 @@ static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -137,6 +162,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -145,6 +171,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -153,6 +180,7 @@ static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
}
static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -161,6 +189,7 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
}
static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -168,6 +197,7 @@ static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
}
static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
if (do_raw_spin_trylock(lock)) {
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 819aeba1c87e..a9d5c7c66e03 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -24,68 +24,120 @@
* flags straight, to suppress compiler warnings of unused lock
* variables, and to add the proper checker annotations:
*/
-#define ___LOCK(lock) \
+#define ___LOCK_(lock) \
do { __acquire(lock); (void)(lock); } while (0)
-#define __LOCK(lock) \
- do { preempt_disable(); ___LOCK(lock); } while (0)
+#define ___LOCK_shared(lock) \
+ do { __acquire_shared(lock); (void)(lock); } while (0)
-#define __LOCK_BH(lock) \
- do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
+#define __LOCK(lock, ...) \
+ do { preempt_disable(); ___LOCK_##__VA_ARGS__(lock); } while (0)
-#define __LOCK_IRQ(lock) \
- do { local_irq_disable(); __LOCK(lock); } while (0)
+#define __LOCK_BH(lock, ...) \
+ do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK_##__VA_ARGS__(lock); } while (0)
-#define __LOCK_IRQSAVE(lock, flags) \
- do { local_irq_save(flags); __LOCK(lock); } while (0)
+#define __LOCK_IRQ(lock, ...) \
+ do { local_irq_disable(); __LOCK(lock, ##__VA_ARGS__); } while (0)
-#define ___UNLOCK(lock) \
+#define __LOCK_IRQSAVE(lock, flags, ...) \
+ do { local_irq_save(flags); __LOCK(lock, ##__VA_ARGS__); } while (0)
+
+#define ___UNLOCK_(lock) \
do { __release(lock); (void)(lock); } while (0)
-#define __UNLOCK(lock) \
- do { preempt_enable(); ___UNLOCK(lock); } while (0)
+#define ___UNLOCK_shared(lock) \
+ do { __release_shared(lock); (void)(lock); } while (0)
+
+#define __UNLOCK(lock, ...) \
+ do { preempt_enable(); ___UNLOCK_##__VA_ARGS__(lock); } while (0)
-#define __UNLOCK_BH(lock) \
+#define __UNLOCK_BH(lock, ...) \
do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \
- ___UNLOCK(lock); } while (0)
+ ___UNLOCK_##__VA_ARGS__(lock); } while (0)
-#define __UNLOCK_IRQ(lock) \
- do { local_irq_enable(); __UNLOCK(lock); } while (0)
+#define __UNLOCK_IRQ(lock, ...) \
+ do { local_irq_enable(); __UNLOCK(lock, ##__VA_ARGS__); } while (0)
-#define __UNLOCK_IRQRESTORE(lock, flags) \
- do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
+#define __UNLOCK_IRQRESTORE(lock, flags, ...) \
+ do { local_irq_restore(flags); __UNLOCK(lock, ##__VA_ARGS__); } while (0)
#define _raw_spin_lock(lock) __LOCK(lock)
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
-#define _raw_read_lock(lock) __LOCK(lock)
+#define _raw_read_lock(lock) __LOCK(lock, shared)
#define _raw_write_lock(lock) __LOCK(lock)
#define _raw_write_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
-#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_read_lock_bh(lock) __LOCK_BH(lock, shared)
#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
-#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock, shared)
#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags, shared)
#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
-#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
-#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
-#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
+
+static __always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK(lock);
+ return 1;
+}
+
+static __always_inline int _raw_spin_trylock_bh(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK_BH(lock);
+ return 1;
+}
+
+static __always_inline int _raw_spin_trylock_irq(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK_IRQ(lock);
+ return 1;
+}
+
+static __always_inline int _raw_spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ __LOCK_IRQSAVE(lock, *(flags));
+ return 1;
+}
+
+static __always_inline int _raw_read_trylock(rwlock_t *lock)
+ __cond_acquires_shared(true, lock)
+{
+ __LOCK(lock, shared);
+ return 1;
+}
+
+static __always_inline int _raw_write_trylock(rwlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK(lock);
+ return 1;
+}
+
+static __always_inline int _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ __LOCK_IRQSAVE(lock, *(flags));
+ return 1;
+}
+
#define _raw_spin_unlock(lock) __UNLOCK(lock)
-#define _raw_read_unlock(lock) __UNLOCK(lock)
+#define _raw_read_unlock(lock) __UNLOCK(lock, shared)
#define _raw_write_unlock(lock) __UNLOCK(lock)
#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock, shared)
#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock, shared)
#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _raw_spin_unlock_irqrestore(lock, flags) \
__UNLOCK_IRQRESTORE(lock, flags)
#define _raw_read_unlock_irqrestore(lock, flags) \
- __UNLOCK_IRQRESTORE(lock, flags)
+ __UNLOCK_IRQRESTORE(lock, flags, shared)
#define _raw_write_unlock_irqrestore(lock, flags) \
__UNLOCK_IRQRESTORE(lock, flags)
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index f6499c37157d..373618a4243c 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -36,10 +36,11 @@ extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock)
extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
extern void rt_spin_unlock(spinlock_t *lock) __releases(lock);
extern void rt_spin_lock_unlock(spinlock_t *lock);
-extern int rt_spin_trylock_bh(spinlock_t *lock);
-extern int rt_spin_trylock(spinlock_t *lock);
+extern int rt_spin_trylock_bh(spinlock_t *lock) __cond_acquires(true, lock);
+extern int rt_spin_trylock(spinlock_t *lock) __cond_acquires(true, lock);
static __always_inline void spin_lock(spinlock_t *lock)
+ __acquires(lock)
{
rt_spin_lock(lock);
}
@@ -82,6 +83,7 @@ static __always_inline void spin_lock(spinlock_t *lock)
__spin_lock_irqsave_nested(lock, flags, subclass)
static __always_inline void spin_lock_bh(spinlock_t *lock)
+ __acquires(lock)
{
/* Investigate: Drop bh when blocking ? */
local_bh_disable();
@@ -89,6 +91,7 @@ static __always_inline void spin_lock_bh(spinlock_t *lock)
}
static __always_inline void spin_lock_irq(spinlock_t *lock)
+ __acquires(lock)
{
rt_spin_lock(lock);
}
@@ -101,45 +104,44 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
} while (0)
static __always_inline void spin_unlock(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
static __always_inline void spin_unlock_bh(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
local_bh_enable();
}
static __always_inline void spin_unlock_irq(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
-#define spin_trylock(lock) \
- __cond_lock(lock, rt_spin_trylock(lock))
+#define spin_trylock(lock) rt_spin_trylock(lock)
-#define spin_trylock_bh(lock) \
- __cond_lock(lock, rt_spin_trylock_bh(lock))
+#define spin_trylock_bh(lock) rt_spin_trylock_bh(lock)
-#define spin_trylock_irq(lock) \
- __cond_lock(lock, rt_spin_trylock(lock))
+#define spin_trylock_irq(lock) rt_spin_trylock(lock)
-#define spin_trylock_irqsave(lock, flags) \
-({ \
- int __locked; \
- \
- typecheck(unsigned long, flags); \
- flags = 0; \
- __locked = spin_trylock(lock); \
- __locked; \
-})
+static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ *flags = 0;
+ return rt_spin_trylock(lock);
+}
+#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
#define spin_is_contended(lock) (((void)(lock), 0))
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 2dfa35ffec76..b65bb6e4451c 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -14,7 +14,7 @@
#ifndef CONFIG_PREEMPT_RT
/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */
-typedef struct spinlock {
+context_lock_struct(spinlock) {
union {
struct raw_spinlock rlock;
@@ -26,7 +26,8 @@ typedef struct spinlock {
};
#endif
};
-} spinlock_t;
+};
+typedef struct spinlock spinlock_t;
#define ___SPIN_LOCK_INITIALIZER(lockname) \
{ \
@@ -47,12 +48,13 @@ typedef struct spinlock {
/* PREEMPT_RT kernels map spinlock to rt_mutex */
#include <linux/rtmutex.h>
-typedef struct spinlock {
+context_lock_struct(spinlock) {
struct rt_mutex_base lock;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} spinlock_t;
+};
+typedef struct spinlock spinlock_t;
#define __SPIN_LOCK_UNLOCKED(name) \
{ \
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
index 91cb36b65a17..e5644ab2161f 100644
--- a/include/linux/spinlock_types_raw.h
+++ b/include/linux/spinlock_types_raw.h
@@ -11,7 +11,7 @@
#include <linux/lockdep_types.h>
-typedef struct raw_spinlock {
+context_lock_struct(raw_spinlock) {
arch_spinlock_t raw_lock;
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
@@ -20,7 +20,8 @@ typedef struct raw_spinlock {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} raw_spinlock_t;
+};
+typedef struct raw_spinlock raw_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 344ad51c8f6c..bb44a0bd7696 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -21,7 +21,7 @@
#include <linux/workqueue.h>
#include <linux/rcu_segcblist.h>
-struct srcu_struct;
+context_lock_struct(srcu_struct, __reentrant_ctx_lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -77,7 +77,7 @@ int init_srcu_struct_fast_updown(struct srcu_struct *ssp);
#define SRCU_READ_FLAVOR_SLOWGP (SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN)
// Flavors requiring synchronize_rcu()
// instead of smp_mb().
-void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases_shared(ssp);
#ifdef CONFIG_TINY_SRCU
#include <linux/srcutiny.h>
@@ -131,14 +131,16 @@ static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned
}
#ifdef CONFIG_NEED_SRCU_NMI_SAFE
-int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
-void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
+int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires_shared(ssp);
+void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases_shared(ssp);
#else
static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
return __srcu_read_lock(ssp);
}
static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
+ __releases_shared(ssp)
{
__srcu_read_unlock(ssp, idx);
}
@@ -210,6 +212,14 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+/*
+ * No-op helper to denote that ssp must be held. Because SRCU-protected pointers
+ * should still be marked with __rcu_guarded, and we do not want to mark them
+ * with __guarded_by(ssp) as it would complicate annotations for writers, we
+ * choose the following strategy: srcu_dereference_check() calls this helper
+ * that checks that the passed ssp is held, and then fake-acquires 'RCU'.
+ */
+static inline void __srcu_read_lock_must_hold(const struct srcu_struct *ssp) __must_hold_shared(ssp) { }
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
@@ -223,9 +233,15 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* to 1. The @c argument will normally be a logical expression containing
* lockdep_is_held() calls.
*/
-#define srcu_dereference_check(p, ssp, c) \
- __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
- (c) || srcu_read_lock_held(ssp), __rcu)
+#define srcu_dereference_check(p, ssp, c) \
+({ \
+ __srcu_read_lock_must_hold(ssp); \
+ __acquire_shared_ctx_lock(RCU); \
+ __auto_type __v = __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || srcu_read_lock_held(ssp), __rcu); \
+ __release_shared_ctx_lock(RCU); \
+ __v; \
+})
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
@@ -268,7 +284,8 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* invoke srcu_read_unlock() from one task and the matching srcu_read_lock()
* from another.
*/
-static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_read_lock(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int retval;
@@ -304,7 +321,8 @@ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
* contexts where RCU is watching, that is, from contexts where it would
* be legal to invoke rcu_read_lock(). Otherwise, lockdep will complain.
*/
-static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires(ssp)
+static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires_shared(ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *retval;
@@ -344,7 +362,7 @@ static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *
* complain.
*/
static inline struct srcu_ctr __percpu *srcu_read_lock_fast_updown(struct srcu_struct *ssp)
-__acquires(ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *retval;
@@ -360,7 +378,7 @@ __acquires(ssp)
* See srcu_read_lock_fast() for more information.
*/
static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_struct *ssp)
- __acquires(ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *retval;
@@ -381,7 +399,7 @@ static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_
* and srcu_read_lock_fast(). However, the same definition/initialization
* requirements called out for srcu_read_lock_safe() apply.
*/
-static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires(ssp)
+static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires_shared(ssp)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_down_read_fast().");
@@ -400,7 +418,8 @@ static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *
* then none of the other flavors may be used, whether before, during,
* or after.
*/
-static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int retval;
@@ -412,7 +431,8 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
/* Used by tracing, cannot be traced and cannot invoke lockdep. */
static inline notrace int
-srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
+srcu_read_lock_notrace(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int retval;
@@ -443,7 +463,8 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
* which calls to down_read() may be nested. The same srcu_struct may be
* used concurrently by srcu_down_read() and srcu_read_lock().
*/
-static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_down_read(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
WARN_ON_ONCE(in_nmi());
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
@@ -458,7 +479,7 @@ static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
* Exit an SRCU read-side critical section.
*/
static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
@@ -474,7 +495,7 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
* Exit a light-weight SRCU read-side critical section.
*/
static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
- __releases(ssp)
+ __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
srcu_lock_release(&ssp->dep_map);
@@ -490,7 +511,7 @@ static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ct
* Exit an SRCU-fast-updown read-side critical section.
*/
static inline void
-srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases(ssp)
+srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
srcu_lock_release(&ssp->dep_map);
@@ -504,7 +525,7 @@ srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *
* See srcu_read_unlock_fast() for more information.
*/
static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp,
- struct srcu_ctr __percpu *scp) __releases(ssp)
+ struct srcu_ctr __percpu *scp) __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
__srcu_read_unlock_fast(ssp, scp);
@@ -519,7 +540,7 @@ static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp,
* the same context as the maching srcu_down_read_fast().
*/
static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
@@ -535,7 +556,7 @@ static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __
* Exit an SRCU read-side critical section, but in an NMI-safe manner.
*/
static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
@@ -545,7 +566,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
/* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void
-srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
+srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
__srcu_read_unlock(ssp, idx);
@@ -560,7 +581,7 @@ srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
* the same context as the maching srcu_down_read().
*/
static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
WARN_ON_ONCE(in_nmi());
@@ -600,15 +621,21 @@ DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
_T->idx = srcu_read_lock(_T->lock),
srcu_read_unlock(_T->lock, _T->idx),
int idx)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu, _T)
DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct,
_T->scp = srcu_read_lock_fast(_T->lock),
srcu_read_unlock_fast(_T->lock, _T->scp),
struct srcu_ctr __percpu *scp)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_fast_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast, _T)
DEFINE_LOCK_GUARD_1(srcu_fast_notrace, struct srcu_struct,
_T->scp = srcu_read_lock_fast_notrace(_T->lock),
srcu_read_unlock_fast_notrace(_T->lock, _T->scp),
struct srcu_ctr __percpu *scp)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_fast_notrace_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, _T)
#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index e0698024667a..dec7cbe015aa 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -73,6 +73,7 @@ void synchronize_srcu(struct srcu_struct *ssp);
* index that must be passed to the matching srcu_read_unlock().
*/
static inline int __srcu_read_lock(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int idx;
@@ -80,6 +81,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
preempt_enable();
+ __acquire_shared(ssp);
return idx;
}
@@ -96,22 +98,26 @@ static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ss
}
static inline struct srcu_ctr __percpu *__srcu_read_lock_fast(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
}
static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
__srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
}
static inline struct srcu_ctr __percpu *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
}
static inline
void __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
__srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
}
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index d6f978b50472..958cb7ef41cb 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -233,7 +233,7 @@ struct srcu_struct {
#define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
__DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST_UPDOWN, static)
-int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
+int __srcu_read_lock(struct srcu_struct *ssp) __acquires_shared(ssp);
void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *ssp);
void srcu_expedite_current(struct srcu_struct *ssp);
@@ -286,6 +286,7 @@ static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ss
* implementations of this_cpu_inc().
*/
static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
@@ -294,6 +295,7 @@ static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct src
else
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
barrier(); /* Avoid leaking the critical section. */
+ __acquire_shared(ssp);
return scp;
}
@@ -308,7 +310,9 @@ static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct src
*/
static inline void notrace
__srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
+ __release_shared(ssp);
barrier(); /* Avoid leaking the critical section. */
if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
@@ -326,6 +330,7 @@ __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
*/
static inline
struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
@@ -334,6 +339,7 @@ struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struc
else
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
barrier(); /* Avoid leaking the critical section. */
+ __acquire_shared(ssp);
return scp;
}
@@ -348,7 +354,9 @@ struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struc
*/
static inline void notrace
__srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
+ __release_shared(ssp);
barrier(); /* Avoid leaking the critical section. */
if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index cf84d98964b2..02bd6ddb6278 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -59,7 +59,6 @@ struct compat_stat;
struct old_timeval32;
struct robust_list_head;
struct futex_waitv;
-struct getcpu_cache;
struct old_linux_dirent;
struct perf_event_attr;
struct file_handle;
@@ -718,7 +717,7 @@ asmlinkage long sys_getrusage(int who, struct rusage __user *ru);
asmlinkage long sys_umask(int mask);
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
-asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
+asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, void __user *cache);
asmlinkage long sys_gettimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
asmlinkage long sys_settimeofday(struct __kernel_old_timeval __user *tv,
@@ -961,6 +960,7 @@ asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags,
unsigned mask, struct statx __user *buffer);
asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len,
int flags, uint32_t sig);
+asmlinkage long sys_rseq_slice_yield(void);
asmlinkage long sys_open_tree(int dfd, const char __user *path, unsigned flags);
asmlinkage long sys_open_tree_attr(int dfd, const char __user *path,
unsigned flags,
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
index b449665c686a..5226efde9ad4 100644
--- a/include/linux/sysfb.h
+++ b/include/linux/sysfb.h
@@ -8,9 +8,11 @@
*/
#include <linux/err.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/screen_info.h>
#include <linux/types.h>
-#include <linux/platform_data/simplefb.h>
+#include <video/edid.h>
struct device;
struct platform_device;
@@ -60,6 +62,16 @@ struct efifb_dmi_info {
int flags;
};
+struct sysfb_display_info {
+ struct screen_info screen;
+
+#if defined(CONFIG_FIRMWARE_EDID)
+ struct edid_info edid;
+#endif
+};
+
+extern struct sysfb_display_info sysfb_primary_display;
+
#ifdef CONFIG_SYSFB
void sysfb_disable(struct device *dev);
@@ -82,16 +94,17 @@ static inline bool sysfb_handles_screen_info(void)
#ifdef CONFIG_EFI
extern struct efifb_dmi_info efifb_dmi_list[];
-void sysfb_apply_efi_quirks(void);
-void sysfb_set_efifb_fwnode(struct platform_device *pd);
+void sysfb_apply_efi_quirks(struct screen_info *si);
+void sysfb_set_efifb_fwnode(const struct screen_info *si, struct platform_device *pd);
#else /* CONFIG_EFI */
-static inline void sysfb_apply_efi_quirks(void)
+static inline void sysfb_apply_efi_quirks(struct screen_info *si)
{
}
-static inline void sysfb_set_efifb_fwnode(struct platform_device *pd)
+static inline void sysfb_set_efifb_fwnode(const struct screen_info *si,
+ struct platform_device *pd)
{
}
diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h
index 1f3e5dad6d0d..ee5f0bd41f43 100644
--- a/include/linux/tee_core.h
+++ b/include/linux/tee_core.h
@@ -76,6 +76,9 @@ struct tee_device {
/**
* struct tee_driver_ops - driver operations vtable
* @get_version: returns version of driver
+ * @get_tee_revision: returns revision string (diagnostic only);
+ * do not infer feature support from this, use
+ * TEE_IOC_VERSION instead
* @open: called for a context when the device file is opened
* @close_context: called when the device file is closed
* @release: called to release the context
@@ -95,9 +98,12 @@ struct tee_device {
* client closes the device file, even if there are existing references to the
* context. The TEE driver can use @close_context to start cleaning up.
*/
+
struct tee_driver_ops {
void (*get_version)(struct tee_device *teedev,
struct tee_ioctl_version_data *vers);
+ int (*get_tee_revision)(struct tee_device *teedev,
+ char *buf, size_t len);
int (*open)(struct tee_context *ctx);
void (*close_context)(struct tee_context *ctx);
void (*release)(struct tee_context *ctx);
@@ -123,6 +129,9 @@ struct tee_driver_ops {
int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm);
};
+/* Size for TEE revision string buffer used by get_tee_revision(). */
+#define TEE_REVISION_STR_SIZE 128
+
/**
* struct tee_desc - Describes the TEE driver to the subsystem
* @name: name of driver
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 88a6f9697c89..e561a26f537a 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -315,6 +315,9 @@ struct tee_client_device {
* @driver: driver structure
*/
struct tee_client_driver {
+ int (*probe)(struct tee_client_device *);
+ void (*remove)(struct tee_client_device *);
+ void (*shutdown)(struct tee_client_device *);
const struct tee_client_device_id *id_table;
struct device_driver driver;
};
@@ -322,4 +325,13 @@ struct tee_client_driver {
#define to_tee_client_driver(d) \
container_of_const(d, struct tee_client_driver, driver)
+#define tee_client_driver_register(drv) \
+ __tee_client_driver_register(drv, THIS_MODULE)
+int __tee_client_driver_register(struct tee_client_driver *, struct module *);
+void tee_client_driver_unregister(struct tee_client_driver *);
+
+#define module_tee_client_driver(__tee_client_driver) \
+ module_driver(__tee_client_driver, tee_client_driver_register, \
+ tee_client_driver_unregister)
+
#endif /*__TEE_DRV_H*/
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
index 6673e4d4ac2e..4933777404d6 100644
--- a/include/linux/textsearch.h
+++ b/include/linux/textsearch.h
@@ -35,6 +35,7 @@ struct ts_state
* @get_pattern: return head of pattern
* @get_pattern_len: return length of pattern
* @owner: module reference to algorithm
+ * @list: list to search
*/
struct ts_ops
{
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index b40de9bab4b7..051e42902690 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -46,15 +46,17 @@ enum syscall_work_bit {
SYSCALL_WORK_BIT_SYSCALL_AUDIT,
SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP,
+ SYSCALL_WORK_BIT_SYSCALL_RSEQ_SLICE,
};
-#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
-#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
-#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
-#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
-#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
-#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
-#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
+#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
+#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
+#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
+#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
+#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
+#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
+#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
+#define SYSCALL_WORK_SYSCALL_RSEQ_SLICE BIT(SYSCALL_WORK_BIT_SYSCALL_RSEQ_SLICE)
#endif
#include <asm/thread_info.h>
diff --git a/include/linux/tick.h b/include/linux/tick.h
index ac76ae9fa36d..738007d6f577 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -126,6 +126,7 @@ enum tick_dep_bits {
#ifdef CONFIG_NO_HZ_COMMON
extern bool tick_nohz_enabled;
+extern bool tick_nohz_is_active(void);
extern bool tick_nohz_tick_stopped(void);
extern bool tick_nohz_tick_stopped_cpu(int cpu);
extern void tick_nohz_idle_stop_tick(void);
@@ -142,6 +143,7 @@ extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
#else /* !CONFIG_NO_HZ_COMMON */
#define tick_nohz_enabled (0)
+static inline bool tick_nohz_is_active(void) { return false; }
static inline int tick_nohz_tick_stopped(void) { return 0; }
static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; }
static inline void tick_nohz_idle_stop_tick(void) { }
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
index dce03a5cafb7..7de6b350e559 100644
--- a/include/linux/timecounter.h
+++ b/include/linux/timecounter.h
@@ -115,6 +115,15 @@ extern void timecounter_init(struct timecounter *tc,
*/
extern u64 timecounter_read(struct timecounter *tc);
+/*
+ * This is like cyclecounter_cyc2ns(), but it is used for computing a
+ * time previous to the time stored in the cycle counter.
+ */
+static inline u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, u64 cycles, u64 frac)
+{
+ return ((cycles * cc->mult) - frac) >> cc->shift;
+}
+
/**
* timecounter_cyc2time - convert a cycle counter to same
* time base as values returned by
@@ -131,7 +140,25 @@ extern u64 timecounter_read(struct timecounter *tc);
*
* Returns: cycle counter converted to nanoseconds since the initial time stamp
*/
-extern u64 timecounter_cyc2time(const struct timecounter *tc,
- u64 cycle_tstamp);
+static inline u64 timecounter_cyc2time(const struct timecounter *tc, u64 cycle_tstamp)
+{
+ const struct cyclecounter *cc = tc->cc;
+ u64 delta = (cycle_tstamp - tc->cycle_last) & cc->mask;
+ u64 nsec = tc->nsec, frac = tc->frac;
+
+ /*
+ * Instead of always treating cycle_tstamp as more recent than
+ * tc->cycle_last, detect when it is too far in the future and
+ * treat it as old time stamp instead.
+ */
+ if (unlikely(delta > cc->mask / 2)) {
+ delta = (tc->cycle_last - cycle_tstamp) & cc->mask;
+ nsec -= cc_cyc2ns_backwards(cc, delta, frac);
+ } else {
+ nsec += cyclecounter_cyc2ns(cc, delta, tc->mask, &frac);
+ }
+
+ return nsec;
+}
#endif
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index c52b862dad45..fa4654ffb621 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -63,6 +63,11 @@ struct tnum tnum_union(struct tnum t1, struct tnum t2);
/* Return @a with all but the lowest @size bytes cleared */
struct tnum tnum_cast(struct tnum a, u8 size);
+/* Swap the bytes of a tnum */
+struct tnum tnum_bswap16(struct tnum a);
+struct tnum tnum_bswap32(struct tnum a);
+struct tnum tnum_bswap64(struct tnum a);
+
/* Returns true if @a is a known constant */
static inline bool tnum_is_const(struct tnum a)
{
diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
index ae04054a1be3..e6ca052b2a85 100644
--- a/include/linux/trace_recursion.h
+++ b/include/linux/trace_recursion.h
@@ -34,6 +34,13 @@ enum {
TRACE_INTERNAL_SIRQ_BIT,
TRACE_INTERNAL_TRANSITION_BIT,
+ /* Internal event use recursion bits */
+ TRACE_INTERNAL_EVENT_BIT,
+ TRACE_INTERNAL_EVENT_NMI_BIT,
+ TRACE_INTERNAL_EVENT_IRQ_BIT,
+ TRACE_INTERNAL_EVENT_SIRQ_BIT,
+ TRACE_INTERNAL_EVENT_TRANSITION_BIT,
+
TRACE_BRANCH_BIT,
/*
* Abuse of the trace_recursion.
@@ -58,6 +65,8 @@ enum {
#define TRACE_LIST_START TRACE_INTERNAL_BIT
+#define TRACE_EVENT_START TRACE_INTERNAL_EVENT_BIT
+
#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
/*
diff --git a/include/linux/tsm.h b/include/linux/tsm.h
index a3b7ab668eff..22e05b2aac69 100644
--- a/include/linux/tsm.h
+++ b/include/linux/tsm.h
@@ -123,7 +123,4 @@ int tsm_report_unregister(const struct tsm_report_ops *ops);
struct tsm_dev *tsm_register(struct device *parent, struct pci_tsm_ops *ops);
void tsm_unregister(struct tsm_dev *tsm_dev);
struct tsm_dev *find_tsm_dev(int id);
-struct pci_ide;
-int tsm_ide_stream_register(struct pci_ide *ide);
-void tsm_ide_stream_unregister(struct pci_ide *ide);
#endif /* __TSM_H */
diff --git a/include/linux/types.h b/include/linux/types.h
index d4437e9c452c..d673747eda8a 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -171,6 +171,11 @@ typedef u64 phys_addr_t;
typedef u32 phys_addr_t;
#endif
+struct phys_vec {
+ phys_addr_t paddr;
+ size_t len;
+};
+
typedef phys_addr_t resource_size_t;
/*
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 5b127043a151..a9bc5b3067e3 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -389,6 +389,9 @@ ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
size_t maxsize, unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0);
+ssize_t iov_iter_extract_bvecs(struct iov_iter *iter, struct bio_vec *bv,
+ size_t max_size, unsigned short *nr_vecs,
+ unsigned short max_vecs, iov_iter_extraction_t extraction_flags);
/**
* iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 18238dc8bfd3..334641e20fb1 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -3,7 +3,7 @@
* include/linux/uio_driver.h
*
* Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
- * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2005, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
* Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de>
* Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
*
diff --git a/include/linux/unwind_user.h b/include/linux/unwind_user.h
index 7f7282516bf5..64618618febd 100644
--- a/include/linux/unwind_user.h
+++ b/include/linux/unwind_user.h
@@ -5,8 +5,22 @@
#include <linux/unwind_user_types.h>
#include <asm/unwind_user.h>
-#ifndef ARCH_INIT_USER_FP_FRAME
- #define ARCH_INIT_USER_FP_FRAME
+#ifndef CONFIG_HAVE_UNWIND_USER_FP
+
+#define ARCH_INIT_USER_FP_FRAME(ws)
+
+#endif
+
+#ifndef ARCH_INIT_USER_FP_ENTRY_FRAME
+#define ARCH_INIT_USER_FP_ENTRY_FRAME(ws)
+#endif
+
+#ifndef unwind_user_at_function_start
+static inline bool unwind_user_at_function_start(struct pt_regs *regs)
+{
+ return false;
+}
+#define unwind_user_at_function_start unwind_user_at_function_start
#endif
int unwind_user(struct unwind_stacktrace *trace, unsigned int max_entries);
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index ee3d36eda45d..f548fea2adec 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -242,6 +242,7 @@ extern void arch_uprobe_clear_state(struct mm_struct *mm);
extern void arch_uprobe_init_state(struct mm_struct *mm);
extern void handle_syscall_uprobe(struct pt_regs *regs, unsigned long bp_vaddr);
extern void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr);
+extern unsigned long arch_uprobe_get_xol_area(void);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
};
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 59409c1fc3de..2f7bd2fdc616 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -75,4 +75,7 @@
/* short SET_ADDRESS request timeout */
#define USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT BIT(16)
+/* skip BOS descriptor request */
+#define USB_QUIRK_NO_BOS BIT(17)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
index 2eb528058d0d..71564868b8f6 100644
--- a/include/linux/util_macros.h
+++ b/include/linux/util_macros.h
@@ -119,7 +119,7 @@
* a fuss about it. This makes the programmer responsible for tagging
* the functions that can be garbage-collected.
*
- * With the macro it is possible to write the following:
+ * With the macro it is possible to write the following::
*
* static int foo_suspend(struct device *dev)
* {
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 3398a345bda8..1909b945b3ea 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -303,6 +303,7 @@ int calculate_pressure_threshold(struct zone *zone);
int calculate_normal_threshold(struct zone *zone);
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
int (*calculate_pressure)(struct zone *));
+void vmstat_flush_workqueue(void);
#else /* CONFIG_SMP */
/*
@@ -403,6 +404,7 @@ static inline void __dec_node_page_state(struct page *page,
static inline void refresh_zone_stat_thresholds(void) { }
static inline void cpu_vm_stats_fold(int cpu) { }
static inline void quiet_vmstat(void) { }
+static inline void vmstat_flush_workqueue(void) { }
static inline void drain_zonestat(struct zone *zone,
struct per_cpu_zonestat *pzstats) { }
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index dabc351cc127..a4749f56398f 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -588,7 +588,7 @@ struct workqueue_attrs *alloc_workqueue_attrs_noprof(void);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
-extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
+extern int workqueue_unbound_housekeeping_update(const struct cpumask *hk);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 45ff6f7a872b..c47d4b9b88b3 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -44,7 +44,7 @@ struct ww_class {
unsigned int is_wait_die;
};
-struct ww_mutex {
+context_lock_struct(ww_mutex) {
struct WW_MUTEX_BASE base;
struct ww_acquire_ctx *ctx;
#ifdef DEBUG_WW_MUTEXES
@@ -52,7 +52,7 @@ struct ww_mutex {
#endif
};
-struct ww_acquire_ctx {
+context_lock_struct(ww_acquire_ctx) {
struct task_struct *task;
unsigned long stamp;
unsigned int acquired;
@@ -141,6 +141,7 @@ static inline void ww_mutex_init(struct ww_mutex *lock,
*/
static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
struct ww_class *ww_class)
+ __acquires(ctx) __no_context_analysis
{
ctx->task = current;
ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
@@ -179,6 +180,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
* data structures.
*/
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
+ __releases(ctx) __acquires_shared(ctx) __no_context_analysis
{
#ifdef DEBUG_WW_MUTEXES
lockdep_assert_held(ctx);
@@ -196,6 +198,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
* mutexes have been released with ww_mutex_unlock.
*/
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
+ __releases_shared(ctx) __no_context_analysis
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_release(&ctx->first_lock_dep_map, _THIS_IP_);
@@ -245,7 +248,8 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
-extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx);
+extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx);
/**
* ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
@@ -278,7 +282,8 @@ extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acq
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx);
+ struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx);
/**
* ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
@@ -305,6 +310,7 @@ extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
*/
static inline void
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ __acquires(lock) __must_hold(ctx) __no_context_analysis
{
int ret;
#ifdef DEBUG_WW_MUTEXES
@@ -342,6 +348,7 @@ ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx)
{
#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
@@ -349,10 +356,11 @@ ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
return ww_mutex_lock_interruptible(lock, ctx);
}
-extern void ww_mutex_unlock(struct ww_mutex *lock);
+extern void ww_mutex_unlock(struct ww_mutex *lock) __releases(lock);
extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx);
+ struct ww_acquire_ctx *ctx)
+ __cond_acquires(true, lock) __must_hold(ctx);
/***
* ww_mutex_destroy - mark a w/w mutex unusable
@@ -363,6 +371,7 @@ extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
* this function is called.
*/
static inline void ww_mutex_destroy(struct ww_mutex *lock)
+ __must_not_hold(lock)
{
#ifndef CONFIG_PREEMPT_RT
mutex_destroy(&lock->base);
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 64e9afe7d647..296b5ee5c979 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -114,7 +114,7 @@ struct simple_xattr {
struct rb_node rb_node;
char *name;
size_t size;
- char value[];
+ char value[] __counted_by(size);
};
void simple_xattrs_init(struct simple_xattrs *xattrs);
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 6f7a58350441..54c83b18d555 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -663,7 +663,22 @@ void v4l_printk_ioctl(const char *prefix, unsigned int cmd);
struct video_device;
/* names for fancy debug output */
+
+/**
+ * var v4l2_field_names - Helper array mapping ``V4L2_FIELD_*`` to strings.
+ *
+ * Specially when printing debug messages, it is interesting to output
+ * the field order at the V4L2 buffers. This array associates all possible
+ * values of field pix format from V4L2 API into a string.
+ */
extern const char *v4l2_field_names[];
+
+/**
+ * var v4l2_type_names - Helper array mapping ``V4L2_BUF_TYPE_*`` to strings.
+ *
+ * When printing debug messages, it is interesting to output the V4L2 buffer
+ * type number with a name that represents its content.
+ */
extern const char *v4l2_type_names[];
#ifdef CONFIG_COMPAT
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 49edc7da0586..462078403557 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -521,13 +521,14 @@ static inline int bond_is_ip6_target_ok(struct in6_addr *addr)
static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond,
struct slave *slave)
{
+ unsigned long tmp, ret = READ_ONCE(slave->target_last_arp_rx[0]);
int i = 1;
- unsigned long ret = slave->target_last_arp_rx[0];
-
- for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++)
- if (time_before(slave->target_last_arp_rx[i], ret))
- ret = slave->target_last_arp_rx[i];
+ for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++) {
+ tmp = READ_ONCE(slave->target_last_arp_rx[i]);
+ if (time_before(tmp, ret))
+ ret = tmp;
+ }
return ret;
}
@@ -537,7 +538,7 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
return slave_oldest_target_arp_rx(bond, slave);
- return slave->last_rx;
+ return READ_ONCE(slave->last_rx);
}
static inline void slave_update_last_tx(struct slave *slave)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 899f267b7cf9..2900202588a5 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3221,8 +3221,6 @@ struct cfg80211_auth_request {
* if this is %NULL for a link, that link is not requested
* @elems: extra elements for the per-STA profile for this link
* @elems_len: length of the elements
- * @disabled: If set this link should be included during association etc. but it
- * should not be used until enabled by the AP MLD.
* @error: per-link error code, must be <= 0. If there is an error, then the
* operation as a whole must fail.
*/
@@ -3230,7 +3228,6 @@ struct cfg80211_assoc_link {
struct cfg80211_bss *bss;
const u8 *elems;
size_t elems_len;
- bool disabled;
int error;
};
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index 58d91ccc56e0..a7b7abd66e21 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -67,6 +67,7 @@
FN(TC_EGRESS) \
FN(SECURITY_HOOK) \
FN(QDISC_DROP) \
+ FN(QDISC_BURST_DROP) \
FN(QDISC_OVERLIMIT) \
FN(QDISC_CONGESTED) \
FN(CAKE_FLOOD) \
@@ -375,6 +376,11 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_QDISC_DROP,
/**
+ * @SKB_DROP_REASON_QDISC_BURST_DROP: dropped when net.core.qdisc_max_burst
+ * limit is hit.
+ */
+ SKB_DROP_REASON_QDISC_BURST_DROP,
+ /**
* @SKB_DROP_REASON_QDISC_OVERLIMIT: dropped by qdisc when a qdisc
* instance exceeds its total buffer size limit.
*/
diff --git a/include/net/hotdata.h b/include/net/hotdata.h
index 4acec191c54a..6632b1aa7584 100644
--- a/include/net/hotdata.h
+++ b/include/net/hotdata.h
@@ -42,6 +42,7 @@ struct net_hotdata {
int netdev_budget_usecs;
int tstamp_prequeue;
int max_backlog;
+ int qdisc_max_burst;
int dev_tx_weight;
int dev_rx_weight;
int sysctl_max_skb_frags;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index ecae35512b9b..4021e6a73e32 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -19,6 +19,7 @@
#include <net/rtnetlink.h>
#include <net/lwtunnel.h>
#include <net/dst_cache.h>
+#include <net/netdev_lock.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -372,7 +373,17 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
fl4->flowi4_flags = flow_flags;
}
-int ip_tunnel_init(struct net_device *dev);
+int __ip_tunnel_init(struct net_device *dev);
+#define ip_tunnel_init(DEV) \
+({ \
+ struct net_device *__dev = (DEV); \
+ int __res = __ip_tunnel_init(__dev); \
+ \
+ if (!__res) \
+ netdev_lockdep_set_classes(__dev);\
+ __res; \
+})
+
void ip_tunnel_uninit(struct net_device *dev);
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
struct net *ip_tunnel_get_link_net(const struct net_device *dev);
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 127e6c7d910d..c54df042db6b 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -219,6 +219,8 @@ static inline void nfc_free_device(struct nfc_dev *dev)
int nfc_register_device(struct nfc_dev *dev);
+void nfc_unregister_rfkill(struct nfc_dev *dev);
+void nfc_remove_device(struct nfc_dev *dev);
void nfc_unregister_device(struct nfc_dev *dev);
/**
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 1ae08e81339f..15679be90c5c 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -41,6 +41,12 @@ struct scsi_eh_save {
unsigned char cmnd[32];
struct scsi_data_buffer sdb;
struct scatterlist sense_sgl;
+
+ /* struct request fields */
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct bio_crypt_ctx *rq_crypt_ctx;
+ struct blk_crypto_keyslot *rq_crypt_keyslot;
+#endif
};
extern void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd,
diff --git a/include/soc/spacemit/ccu.h b/include/soc/spacemit/ccu.h
new file mode 100644
index 000000000000..84dcdecccc05
--- /dev/null
+++ b/include/soc/spacemit/ccu.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_SPACEMIT_CCU_H__
+#define __SOC_SPACEMIT_CCU_H__
+
+#include <linux/auxiliary_bus.h>
+#include <linux/regmap.h>
+
+/* Auxiliary device used to represent a CCU reset controller */
+struct spacemit_ccu_adev {
+ struct auxiliary_device adev;
+ struct regmap *regmap;
+};
+
+static inline struct spacemit_ccu_adev *
+to_spacemit_ccu_adev(struct auxiliary_device *adev)
+{
+ return container_of(adev, struct spacemit_ccu_adev, adev);
+}
+
+#endif /* __SOC_SPACEMIT_CCU_H__ */
diff --git a/include/soc/spacemit/k1-syscon.h b/include/soc/spacemit/k1-syscon.h
index 354751562c55..0be7a2e8d445 100644
--- a/include/soc/spacemit/k1-syscon.h
+++ b/include/soc/spacemit/k1-syscon.h
@@ -5,17 +5,7 @@
#ifndef __SOC_K1_SYSCON_H__
#define __SOC_K1_SYSCON_H__
-/* Auxiliary device used to represent a CCU reset controller */
-struct spacemit_ccu_adev {
- struct auxiliary_device adev;
- struct regmap *regmap;
-};
-
-static inline struct spacemit_ccu_adev *
-to_spacemit_ccu_adev(struct auxiliary_device *adev)
-{
- return container_of(adev, struct spacemit_ccu_adev, adev);
-}
+#include "ccu.h"
/* APBS register offset */
#define APBS_PLL1_SWCR1 0x100
diff --git a/include/soc/spacemit/k3-syscon.h b/include/soc/spacemit/k3-syscon.h
new file mode 100644
index 000000000000..0299bea065a0
--- /dev/null
+++ b/include/soc/spacemit/k3-syscon.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/* SpacemiT clock and reset driver definitions for the K3 SoC */
+
+#ifndef __SOC_K3_SYSCON_H__
+#define __SOC_K3_SYSCON_H__
+
+#include "ccu.h"
+
+/* APBS register offset */
+#define APBS_PLL1_SWCR1 0x100
+#define APBS_PLL1_SWCR2 0x104
+#define APBS_PLL1_SWCR3 0x108
+#define APBS_PLL2_SWCR1 0x118
+#define APBS_PLL2_SWCR2 0x11c
+#define APBS_PLL2_SWCR3 0x120
+#define APBS_PLL3_SWCR1 0x124
+#define APBS_PLL3_SWCR2 0x128
+#define APBS_PLL3_SWCR3 0x12c
+#define APBS_PLL4_SWCR1 0x130
+#define APBS_PLL4_SWCR2 0x134
+#define APBS_PLL4_SWCR3 0x138
+#define APBS_PLL5_SWCR1 0x13c
+#define APBS_PLL5_SWCR2 0x140
+#define APBS_PLL5_SWCR3 0x144
+#define APBS_PLL6_SWCR1 0x148
+#define APBS_PLL6_SWCR2 0x14c
+#define APBS_PLL6_SWCR3 0x150
+#define APBS_PLL7_SWCR1 0x158
+#define APBS_PLL7_SWCR2 0x15c
+#define APBS_PLL7_SWCR3 0x160
+#define APBS_PLL8_SWCR1 0x180
+#define APBS_PLL8_SWCR2 0x184
+#define APBS_PLL8_SWCR3 0x188
+
+/* MPMU register offset */
+#define MPMU_FCCR 0x0008
+#define MPMU_POSR 0x0010
+#define POSR_PLL1_LOCK BIT(24)
+#define POSR_PLL2_LOCK BIT(25)
+#define POSR_PLL3_LOCK BIT(26)
+#define POSR_PLL4_LOCK BIT(27)
+#define POSR_PLL5_LOCK BIT(28)
+#define POSR_PLL6_LOCK BIT(29)
+#define POSR_PLL7_LOCK BIT(30)
+#define POSR_PLL8_LOCK BIT(31)
+#define MPMU_SUCCR 0x0014
+#define MPMU_ISCCR 0x0044
+#define MPMU_WDTPCR 0x0200
+#define MPMU_RIPCCR 0x0210
+#define MPMU_ACGR 0x1024
+#define MPMU_APBCSCR 0x1050
+#define MPMU_SUCCR_1 0x10b0
+
+#define MPMU_I2S0_SYSCLK 0x1100
+#define MPMU_I2S2_SYSCLK 0x1104
+#define MPMU_I2S3_SYSCLK 0x1108
+#define MPMU_I2S4_SYSCLK 0x110c
+#define MPMU_I2S5_SYSCLK 0x1110
+#define MPMU_I2S_SYSCLK_CTRL 0x1114
+
+/* APBC register offset */
+#define APBC_UART0_CLK_RST 0x00
+#define APBC_UART2_CLK_RST 0x04
+#define APBC_GPIO_CLK_RST 0x08
+#define APBC_PWM0_CLK_RST 0x0c
+#define APBC_PWM1_CLK_RST 0x10
+#define APBC_PWM2_CLK_RST 0x14
+#define APBC_PWM3_CLK_RST 0x18
+#define APBC_TWSI8_CLK_RST 0x20
+#define APBC_UART3_CLK_RST 0x24
+#define APBC_RTC_CLK_RST 0x28
+#define APBC_TWSI0_CLK_RST 0x2c
+#define APBC_TWSI1_CLK_RST 0x30
+#define APBC_TIMERS0_CLK_RST 0x34
+#define APBC_TWSI2_CLK_RST 0x38
+#define APBC_AIB_CLK_RST 0x3c
+#define APBC_TWSI4_CLK_RST 0x40
+#define APBC_TIMERS1_CLK_RST 0x44
+#define APBC_ONEWIRE_CLK_RST 0x48
+#define APBC_TWSI5_CLK_RST 0x4c
+#define APBC_DRO_CLK_RST 0x58
+#define APBC_IR0_CLK_RST 0x5c
+#define APBC_IR1_CLK_RST 0x1c
+#define APBC_TWSI6_CLK_RST 0x60
+#define APBC_COUNTER_CLK_SEL 0x64
+#define APBC_TSEN_CLK_RST 0x6c
+#define APBC_UART4_CLK_RST 0x70
+#define APBC_UART5_CLK_RST 0x74
+#define APBC_UART6_CLK_RST 0x78
+#define APBC_SSP3_CLK_RST 0x7c
+#define APBC_SSPA0_CLK_RST 0x80
+#define APBC_SSPA1_CLK_RST 0x84
+#define APBC_SSPA2_CLK_RST 0x88
+#define APBC_SSPA3_CLK_RST 0x8c
+#define APBC_IPC_AP2AUD_CLK_RST 0x90
+#define APBC_UART7_CLK_RST 0x94
+#define APBC_UART8_CLK_RST 0x98
+#define APBC_UART9_CLK_RST 0x9c
+#define APBC_CAN0_CLK_RST 0xa0
+#define APBC_CAN1_CLK_RST 0xa4
+#define APBC_PWM4_CLK_RST 0xa8
+#define APBC_PWM5_CLK_RST 0xac
+#define APBC_PWM6_CLK_RST 0xb0
+#define APBC_PWM7_CLK_RST 0xb4
+#define APBC_PWM8_CLK_RST 0xb8
+#define APBC_PWM9_CLK_RST 0xbc
+#define APBC_PWM10_CLK_RST 0xc0
+#define APBC_PWM11_CLK_RST 0xc4
+#define APBC_PWM12_CLK_RST 0xc8
+#define APBC_PWM13_CLK_RST 0xcc
+#define APBC_PWM14_CLK_RST 0xd0
+#define APBC_PWM15_CLK_RST 0xd4
+#define APBC_PWM16_CLK_RST 0xd8
+#define APBC_PWM17_CLK_RST 0xdc
+#define APBC_PWM18_CLK_RST 0xe0
+#define APBC_PWM19_CLK_RST 0xe4
+#define APBC_TIMERS2_CLK_RST 0x11c
+#define APBC_TIMERS3_CLK_RST 0x120
+#define APBC_TIMERS4_CLK_RST 0x124
+#define APBC_TIMERS5_CLK_RST 0x128
+#define APBC_TIMERS6_CLK_RST 0x12c
+#define APBC_TIMERS7_CLK_RST 0x130
+
+#define APBC_CAN2_CLK_RST 0x148
+#define APBC_CAN3_CLK_RST 0x14c
+#define APBC_CAN4_CLK_RST 0x150
+#define APBC_UART10_CLK_RST 0x154
+#define APBC_SSP0_CLK_RST 0x158
+#define APBC_SSP1_CLK_RST 0x15c
+#define APBC_SSPA4_CLK_RST 0x160
+#define APBC_SSPA5_CLK_RST 0x164
+
+/* APMU register offset */
+#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024
+#define APMU_ISP_CLK_RES_CTRL 0x038
+#define APMU_PMU_CLK_GATE_CTRL 0x040
+#define APMU_LCD_CLK_RES_CTRL1 0x044
+#define APMU_LCD_SPI_CLK_RES_CTRL 0x048
+#define APMU_LCD_CLK_RES_CTRL2 0x04c
+#define APMU_CCIC_CLK_RES_CTRL 0x050
+#define APMU_SDH0_CLK_RES_CTRL 0x054
+#define APMU_SDH1_CLK_RES_CTRL 0x058
+#define APMU_USB_CLK_RES_CTRL 0x05c
+#define APMU_QSPI_CLK_RES_CTRL 0x060
+#define APMU_DMA_CLK_RES_CTRL 0x064
+#define APMU_AES_CLK_RES_CTRL 0x068
+#define APMU_MCB_CLK_RES_CTRL 0x06c
+#define APMU_VPU_CLK_RES_CTRL 0x0a4
+#define APMU_DTC_CLK_RES_CTRL 0x0ac
+#define APMU_GPU_CLK_RES_CTRL 0x0cc
+#define APMU_SDH2_CLK_RES_CTRL 0x0e0
+#define APMU_PMUA_MC_CTRL 0x0e8
+#define APMU_PMU_CC2_AP 0x100
+#define APMU_PMUA_EM_CLK_RES_CTRL 0x104
+#define APMU_UCIE_CTRL 0x11c
+#define APMU_RCPU_CLK_RES_CTRL 0x14c
+#define APMU_TOP_DCLK_CTRL 0x158
+#define APMU_LCD_EDP_CTRL 0x23c
+#define APMU_UFS_CLK_RES_CTRL 0x268
+#define APMU_LCD_CLK_RES_CTRL3 0x26c
+#define APMU_LCD_CLK_RES_CTRL4 0x270
+#define APMU_LCD_CLK_RES_CTRL5 0x274
+#define APMU_CCI550_CLK_CTRL 0x300
+#define APMU_ACLK_CLK_CTRL 0x388
+#define APMU_CPU_C0_CLK_CTRL 0x38C
+#define APMU_CPU_C1_CLK_CTRL 0x390
+#define APMU_CPU_C2_CLK_CTRL 0x394
+#define APMU_CPU_C3_CLK_CTRL 0x208
+#define APMU_PCIE_CLK_RES_CTRL_A 0x1f0
+#define APMU_PCIE_CLK_RES_CTRL_B 0x1c8
+#define APMU_PCIE_CLK_RES_CTRL_C 0x1d0
+#define APMU_PCIE_CLK_RES_CTRL_D 0x1e0
+#define APMU_PCIE_CLK_RES_CTRL_E 0x1e8
+#define APMU_EMAC0_CLK_RES_CTRL 0x3e4
+#define APMU_EMAC1_CLK_RES_CTRL 0x3ec
+#define APMU_EMAC2_CLK_RES_CTRL 0x248
+#define APMU_ESPI_CLK_RES_CTRL 0x240
+#define APMU_SNR_ISIM_VCLK_CTRL 0x3f8
+
+/* DCIU register offsets */
+#define DCIU_DMASYS_CLK_EN 0x234
+#define DCIU_DMASYS_SDMA_CLK_EN 0x238
+#define DCIU_C2_TCM_PIPE_CLK 0x244
+#define DCIU_C3_TCM_PIPE_CLK 0x248
+
+#define DCIU_DMASYS_S0_RSTN 0x204
+#define DCIU_DMASYS_S1_RSTN 0x208
+#define DCIU_DMASYS_A0_RSTN 0x20C
+#define DCIU_DMASYS_A1_RSTN 0x210
+#define DCIU_DMASYS_A2_RSTN 0x214
+#define DCIU_DMASYS_A3_RSTN 0x218
+#define DCIU_DMASYS_A4_RSTN 0x21C
+#define DCIU_DMASYS_A5_RSTN 0x220
+#define DCIU_DMASYS_A6_RSTN 0x224
+#define DCIU_DMASYS_A7_RSTN 0x228
+#define DCIU_DMASYS_RSTN 0x22C
+#define DCIU_DMASYS_SDMA_RSTN 0x230
+
+/* RCPU SYSCTRL register offsets */
+#define RCPU_CAN_CLK_RST 0x4c
+#define RCPU_CAN1_CLK_RST 0xF0
+#define RCPU_CAN2_CLK_RST 0xF4
+#define RCPU_CAN3_CLK_RST 0xF8
+#define RCPU_CAN4_CLK_RST 0xFC
+#define RCPU_IRC_CLK_RST 0x48
+#define RCPU_IRC1_CLK_RST 0xEC
+#define RCPU_GMAC_CLK_RST 0xE4
+#define RCPU_ESPI_CLK_RST 0xDC
+#define RCPU_AUDIO_I2S0_SYS_CLK_CTRL 0x70
+#define RCPU_AUDIO_I2S1_SYS_CLK_CTRL 0x44
+
+/* RCPU UARTCTRL register offsets */
+#define RCPU1_UART0_CLK_RST 0x00
+#define RCPU1_UART1_CLK_RST 0x04
+#define RCPU1_UART2_CLK_RST 0x08
+#define RCPU1_UART3_CLK_RST 0x0c
+#define RCPU1_UART4_CLK_RST 0x10
+#define RCPU1_UART5_CLK_RST 0x14
+
+/* RCPU I2SCTRL register offsets */
+#define RCPU2_AUDIO_I2S0_TX_RX_CLK_CTRL 0x60
+#define RCPU2_AUDIO_I2S1_TX_RX_CLK_CTRL 0x64
+#define RCPU2_AUDIO_I2S2_TX_RX_CLK_CTRL 0x68
+#define RCPU2_AUDIO_I2S3_TX_RX_CLK_CTRL 0x6C
+
+#define RCPU2_AUDIO_I2S2_SYS_CLK_CTRL 0x44
+#define RCPU2_AUDIO_I2S3_SYS_CLK_CTRL 0x54
+
+/* RCPU SPICTRL register offsets */
+#define RCPU3_SSP0_CLK_RST 0x00
+#define RCPU3_SSP1_CLK_RST 0x04
+#define RCPU3_PWR_SSP_CLK_RST 0x08
+
+/* RCPU I2CCTRL register offsets */
+#define RCPU4_I2C0_CLK_RST 0x00
+#define RCPU4_I2C1_CLK_RST 0x04
+#define RCPU4_PWR_I2C_CLK_RST 0x08
+
+/* RPMU register offsets */
+#define RCPU5_AON_PER_CLK_RST_CTRL 0x2C
+#define RCPU5_TIMER1_CLK_RST 0x4C
+#define RCPU5_TIMER2_CLK_RST 0x70
+#define RCPU5_TIMER3_CLK_RST 0x78
+#define RCPU5_TIMER4_CLK_RST 0x7C
+#define RCPU5_GPIO_AND_EDGE_CLK_RST 0x74
+#define RCPU5_RCPU_BUS_CLK_CTRL 0xC0
+#define RCPU5_RT24_CORE0_CLK_CTRL 0xC4
+#define RCPU5_RT24_CORE1_CLK_CTRL 0xC8
+#define RCPU5_RT24_CORE0_SW_RESET 0xCC
+#define RCPU5_RT24_CORE1_SW_RESET 0xD0
+
+/* RCPU PWMCTRL register offsets */
+#define RCPU6_PWM0_CLK_RST 0x00
+#define RCPU6_PWM1_CLK_RST 0x04
+#define RCPU6_PWM2_CLK_RST 0x08
+#define RCPU6_PWM3_CLK_RST 0x0c
+#define RCPU6_PWM4_CLK_RST 0x10
+#define RCPU6_PWM5_CLK_RST 0x14
+#define RCPU6_PWM6_CLK_RST 0x18
+#define RCPU6_PWM7_CLK_RST 0x1c
+#define RCPU6_PWM8_CLK_RST 0x20
+#define RCPU6_PWM9_CLK_RST 0x24
+
+/* APBC2 SEC register offsets */
+#define APBC2_UART1_CLK_RST 0x00
+#define APBC2_SSP2_CLK_RST 0x04
+#define APBC2_TWSI3_CLK_RST 0x08
+#define APBC2_RTC_CLK_RST 0x0c
+#define APBC2_TIMERS_CLK_RST 0x10
+#define APBC2_GPIO_CLK_RST 0x1c
+
+#endif /* __SOC_K3_SYSCON_H__ */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index c545875d0ff1..1fd21be02577 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -16,6 +16,7 @@
struct clk;
struct reset_control;
+struct tegra_pmc;
bool tegra_pmc_cpu_is_powered(unsigned int cpuid);
int tegra_pmc_cpu_power_on(unsigned int cpuid);
@@ -149,11 +150,24 @@ enum tegra_io_pad {
};
#ifdef CONFIG_SOC_TEGRA_PMC
+struct tegra_pmc *devm_tegra_pmc_get(struct device *dev);
+
+int tegra_pmc_powergate_power_on(struct tegra_pmc *pmc, unsigned int id);
+int tegra_pmc_powergate_power_off(struct tegra_pmc *pmc, unsigned int id);
+int tegra_pmc_powergate_remove_clamping(struct tegra_pmc *pmc, unsigned int id);
+
+/* Must be called with clk disabled, and returns with clk enabled */
+int tegra_pmc_powergate_sequence_power_up(struct tegra_pmc *pmc,
+ unsigned int id, struct clk *clk,
+ struct reset_control *rst);
+int tegra_pmc_io_pad_power_enable(struct tegra_pmc *pmc, enum tegra_io_pad id);
+int tegra_pmc_io_pad_power_disable(struct tegra_pmc *pmc, enum tegra_io_pad id);
+
+/* legacy */
int tegra_powergate_power_on(unsigned int id);
int tegra_powergate_power_off(unsigned int id);
int tegra_powergate_remove_clamping(unsigned int id);
-/* Must be called with clk disabled, and returns with clk enabled */
int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
struct reset_control *rst);
@@ -166,6 +180,50 @@ void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
bool tegra_pmc_core_domain_state_synced(void);
#else
+static inline struct tegra_pmc *devm_tegra_pmc_get(struct device *dev)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int
+tegra_pmc_powergate_power_on(struct tegra_pmc *pmc, unsigned int id)
+{
+ return -ENOSYS;
+}
+
+static inline int
+tegra_pmc_powergate_power_off(struct tegra_pmc *pmc, unsigned int id)
+{
+ return -ENOSYS;
+}
+
+static inline int
+tegra_pmc_powergate_remove_clamping(struct tegra_pmc *pmc, unsigned int id)
+{
+ return -ENOSYS;
+}
+
+/* Must be called with clk disabled, and returns with clk enabled */
+static inline int
+tegra_pmc_powergate_sequence_power_up(struct tegra_pmc *pmc, unsigned int id,
+ struct clk *clk,
+ struct reset_control *rst)
+{
+ return -ENOSYS;
+}
+
+static inline int
+tegra_pmc_io_pad_power_enable(struct tegra_pmc *pmc, enum tegra_io_pad id)
+{
+ return -ENOSYS;
+}
+
+static inline int
+tegra_pmc_io_pad_power_disable(struct tegra_pmc *pmc, enum tegra_io_pad id)
+{
+ return -ENOSYS;
+}
+
static inline int tegra_powergate_power_on(unsigned int id)
{
return -ENOSYS;
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 58fd6e84f961..a7860c047503 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1402,7 +1402,7 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
#define snd_pcm_lib_mmap_iomem NULL
#endif
-void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime);
+int snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime);
/**
* snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 7e418f065b94..125bdc166bfe 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -224,7 +224,8 @@ DECLARE_EVENT_CLASS(btrfs__inode,
__entry->generation = BTRFS_I(inode)->generation;
__entry->last_trans = BTRFS_I(inode)->last_trans;
__entry->logged_trans = BTRFS_I(inode)->logged_trans;
- __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
+ __entry->root_objectid = BTRFS_I(inode)->root ?
+ btrfs_root_id(BTRFS_I(inode)->root) : 0;
),
TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%llu blocks=%llu "
diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
index b3fef140ae15..33e99e792f1a 100644
--- a/include/trace/events/dma.h
+++ b/include/trace/events/dma.h
@@ -275,6 +275,8 @@ TRACE_EVENT(dma_free_sgt,
sizeof(u64), sizeof(u64)))
);
+#define DMA_TRACE_MAX_ENTRIES 128
+
TRACE_EVENT(dma_map_sg,
TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
int ents, enum dma_data_direction dir, unsigned long attrs),
@@ -282,9 +284,12 @@ TRACE_EVENT(dma_map_sg,
TP_STRUCT__entry(
__string(device, dev_name(dev))
- __dynamic_array(u64, phys_addrs, nents)
- __dynamic_array(u64, dma_addrs, ents)
- __dynamic_array(unsigned int, lengths, ents)
+ __field(int, full_nents)
+ __field(int, full_ents)
+ __field(bool, truncated)
+ __dynamic_array(u64, phys_addrs, min(nents, DMA_TRACE_MAX_ENTRIES))
+ __dynamic_array(u64, dma_addrs, min(ents, DMA_TRACE_MAX_ENTRIES))
+ __dynamic_array(unsigned int, lengths, min(ents, DMA_TRACE_MAX_ENTRIES))
__field(enum dma_data_direction, dir)
__field(unsigned long, attrs)
),
@@ -292,11 +297,16 @@ TRACE_EVENT(dma_map_sg,
TP_fast_assign(
struct scatterlist *sg;
int i;
+ int traced_nents = min_t(int, nents, DMA_TRACE_MAX_ENTRIES);
+ int traced_ents = min_t(int, ents, DMA_TRACE_MAX_ENTRIES);
__assign_str(device);
- for_each_sg(sgl, sg, nents, i)
+ __entry->full_nents = nents;
+ __entry->full_ents = ents;
+ __entry->truncated = (nents > DMA_TRACE_MAX_ENTRIES) || (ents > DMA_TRACE_MAX_ENTRIES);
+ for_each_sg(sgl, sg, traced_nents, i)
((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
- for_each_sg(sgl, sg, ents, i) {
+ for_each_sg(sgl, sg, traced_ents, i) {
((u64 *)__get_dynamic_array(dma_addrs))[i] =
sg_dma_address(sg);
((unsigned int *)__get_dynamic_array(lengths))[i] =
@@ -306,9 +316,12 @@ TRACE_EVENT(dma_map_sg,
__entry->attrs = attrs;
),
- TP_printk("%s dir=%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s",
+ TP_printk("%s dir=%s nents=%d/%d ents=%d/%d%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s",
__get_str(device),
decode_dma_data_direction(__entry->dir),
+ min_t(int, __entry->full_nents, DMA_TRACE_MAX_ENTRIES), __entry->full_nents,
+ min_t(int, __entry->full_ents, DMA_TRACE_MAX_ENTRIES), __entry->full_ents,
+ __entry->truncated ? " [TRUNCATED]" : "",
__print_array(__get_dynamic_array(dma_addrs),
__get_dynamic_array_len(dma_addrs) /
sizeof(u64), sizeof(u64)),
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index dad7360f42f9..def20d06507b 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -82,9 +82,9 @@ TRACE_EVENT(erofs_fill_inode,
TRACE_EVENT(erofs_read_folio,
- TP_PROTO(struct folio *folio, bool raw),
+ TP_PROTO(struct inode *inode, struct folio *folio, bool raw),
- TP_ARGS(folio, raw),
+ TP_ARGS(inode, folio, raw),
TP_STRUCT__entry(
__field(dev_t, dev )
@@ -96,9 +96,9 @@ TRACE_EVENT(erofs_read_folio,
),
TP_fast_assign(
- __entry->dev = folio->mapping->host->i_sb->s_dev;
- __entry->nid = EROFS_I(folio->mapping->host)->nid;
- __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->nid = EROFS_I(inode)->nid;
+ __entry->dir = S_ISDIR(inode->i_mode);
__entry->index = folio->index;
__entry->uptodate = folio_test_uptodate(folio);
__entry->raw = raw;
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index de6f6d25767c..869f97c9bf73 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -322,6 +322,7 @@
EM(rxrpc_call_put_kernel, "PUT kernel ") \
EM(rxrpc_call_put_poke, "PUT poke ") \
EM(rxrpc_call_put_recvmsg, "PUT recvmsg ") \
+ EM(rxrpc_call_put_recvmsg_peek_nowait, "PUT peek-nwt") \
EM(rxrpc_call_put_release_recvmsg_q, "PUT rls-rcmq") \
EM(rxrpc_call_put_release_sock, "PUT rls-sock") \
EM(rxrpc_call_put_release_sock_tba, "PUT rls-sk-a") \
@@ -340,6 +341,9 @@
EM(rxrpc_call_see_input, "SEE input ") \
EM(rxrpc_call_see_notify_released, "SEE nfy-rlsd") \
EM(rxrpc_call_see_recvmsg, "SEE recvmsg ") \
+ EM(rxrpc_call_see_recvmsg_requeue, "SEE recv-rqu") \
+ EM(rxrpc_call_see_recvmsg_requeue_first, "SEE recv-rqF") \
+ EM(rxrpc_call_see_recvmsg_requeue_move, "SEE recv-rqM") \
EM(rxrpc_call_see_release, "SEE release ") \
EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
EM(rxrpc_call_see_waiting_call, "SEE q-conn ") \
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 311a341e6fe4..7162d03e69a5 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -856,12 +856,6 @@ DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
TP_ARGS(inode)
);
-DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode)
-);
-
DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
TP_PROTO(struct inode *inode),
diff --git a/include/trace/misc/nfs.h b/include/trace/misc/nfs.h
index c82233e950ac..a394b4d38e18 100644
--- a/include/trace/misc/nfs.h
+++ b/include/trace/misc/nfs.h
@@ -16,7 +16,6 @@ TRACE_DEFINE_ENUM(NFSERR_PERM);
TRACE_DEFINE_ENUM(NFSERR_NOENT);
TRACE_DEFINE_ENUM(NFSERR_IO);
TRACE_DEFINE_ENUM(NFSERR_NXIO);
-TRACE_DEFINE_ENUM(NFSERR_EAGAIN);
TRACE_DEFINE_ENUM(NFSERR_ACCES);
TRACE_DEFINE_ENUM(NFSERR_EXIST);
TRACE_DEFINE_ENUM(NFSERR_XDEV);
@@ -52,7 +51,6 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
{ NFSERR_NXIO, "NXIO" }, \
{ ECHILD, "CHILD" }, \
{ ETIMEDOUT, "TIMEDOUT" }, \
- { NFSERR_EAGAIN, "AGAIN" }, \
{ NFSERR_ACCES, "ACCES" }, \
{ NFSERR_EXIST, "EXIST" }, \
{ NFSERR_XDEV, "XDEV" }, \
diff --git a/include/uapi/asm-generic/errno.h b/include/uapi/asm-generic/errno.h
index cf9c51ac49f9..92e7ae493ee3 100644
--- a/include/uapi/asm-generic/errno.h
+++ b/include/uapi/asm-generic/errno.h
@@ -55,6 +55,7 @@
#define EMULTIHOP 72 /* Multihop attempted */
#define EDOTDOT 73 /* RFS specific error */
#define EBADMSG 74 /* Not a data message */
+#define EFSBADCRC EBADMSG /* Bad CRC detected */
#define EOVERFLOW 75 /* Value too large for defined data type */
#define ENOTUNIQ 76 /* Name not unique on network */
#define EBADFD 77 /* File descriptor in bad state */
@@ -98,6 +99,7 @@
#define EINPROGRESS 115 /* Operation now in progress */
#define ESTALE 116 /* Stale file handle */
#define EUCLEAN 117 /* Structure needs cleaning */
+#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
#define ENOTNAM 118 /* Not a XENIX named type file */
#define ENAVAIL 119 /* No XENIX semaphores available */
#define EISNAM 120 /* Is a named type file */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 942370b3f5d2..a627acc8fb5f 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -860,8 +860,11 @@ __SYSCALL(__NR_file_setattr, sys_file_setattr)
#define __NR_listns 470
__SYSCALL(__NR_listns, sys_listns)
+#define __NR_rseq_slice_yield 471
+__SYSCALL(__NR_rseq_slice_yield, sys_rseq_slice_yield)
+
#undef __NR_syscalls
-#define __NR_syscalls 471
+#define __NR_syscalls 472
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index e33f02703350..663836120966 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -81,7 +81,8 @@ enum blk_zone_cond {
BLK_ZONE_COND_FULL = 0xE,
BLK_ZONE_COND_OFFLINE = 0xF,
- BLK_ZONE_COND_ACTIVE = 0xFF,
+ BLK_ZONE_COND_ACTIVE = 0xFF, /* added in Linux 6.19 */
+#define BLK_ZONE_COND_ACTIVE BLK_ZONE_COND_ACTIVE
};
/**
@@ -100,7 +101,8 @@ enum blk_zone_report_flags {
BLK_ZONE_REP_CAPACITY = (1U << 0),
/* Input flags */
- BLK_ZONE_REP_CACHED = (1U << 31),
+ BLK_ZONE_REP_CACHED = (1U << 31), /* added in Linux 6.19 */
+#define BLK_ZONE_REP_CACHED BLK_ZONE_REP_CACHED
};
/**
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f8d8513eda27..c8d400b7680a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -119,6 +119,14 @@ enum bpf_cgroup_iter_order {
BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */
BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */
BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */
+ /*
+ * Walks the immediate children of the specified parent
+ * cgroup_subsys_state. Unlike BPF_CGROUP_ITER_DESCENDANTS_PRE,
+ * BPF_CGROUP_ITER_DESCENDANTS_POST, and BPF_CGROUP_ITER_ANCESTORS_UP
+ * the iterator does not include the specified parent as one of the
+ * returned iterator elements.
+ */
+ BPF_CGROUP_ITER_CHILDREN,
};
union bpf_iter_link_info {
@@ -918,6 +926,16 @@ union bpf_iter_link_info {
* Number of bytes read from the stream on success, or -1 if an
* error occurred (in which case, *errno* is set appropriately).
*
+ * BPF_PROG_ASSOC_STRUCT_OPS
+ * Description
+ * Associate a BPF program with a struct_ops map. The struct_ops
+ * map is identified by *map_fd* and the BPF program is
+ * identified by *prog_fd*.
+ *
+ * Return
+ * 0 on success or -1 if an error occurred (in which case,
+ * *errno* is set appropriately).
+ *
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@@ -974,6 +992,7 @@ enum bpf_cmd {
BPF_PROG_BIND_MAP,
BPF_TOKEN_CREATE,
BPF_PROG_STREAM_READ_BY_FD,
+ BPF_PROG_ASSOC_STRUCT_OPS,
__MAX_BPF_CMD,
};
@@ -1134,6 +1153,7 @@ enum bpf_attach_type {
BPF_NETKIT_PEER,
BPF_TRACE_KPROBE_SESSION,
BPF_TRACE_UPROBE_SESSION,
+ BPF_TRACE_FSESSION,
__MAX_BPF_ATTACH_TYPE
};
@@ -1373,6 +1393,8 @@ enum {
BPF_NOEXIST = 1, /* create new element if it didn't exist */
BPF_EXIST = 2, /* update existing element */
BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
+ BPF_F_CPU = 8, /* cpu flag for percpu maps, upper 32-bit of flags is a cpu number */
+ BPF_F_ALL_CPUS = 16, /* update value across all CPUs for percpu maps */
};
/* flags for BPF_MAP_CREATE command */
@@ -1894,6 +1916,12 @@ union bpf_attr {
__u32 prog_fd;
} prog_stream_read;
+ struct {
+ __u32 map_fd;
+ __u32 prog_fd;
+ __u32 flags;
+ } prog_assoc_struct_ops;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index e8fd92789423..9165154a274d 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -336,6 +336,7 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13)
#define BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE (1ULL << 14)
#define BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA (1ULL << 16)
+#define BTRFS_FEATURE_INCOMPAT_REMAP_TREE (1ULL << 17)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index fc29d273845d..f7843e6bb978 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -76,6 +76,9 @@
/* Tracks RAID stripes in block groups. */
#define BTRFS_RAID_STRIPE_TREE_OBJECTID 12ULL
+/* Holds details of remapped addresses after relocation. */
+#define BTRFS_REMAP_TREE_OBJECTID 13ULL
+
/* device stats in the device tree */
#define BTRFS_DEV_STATS_OBJECTID 0ULL
@@ -282,6 +285,10 @@
#define BTRFS_RAID_STRIPE_KEY 230
+#define BTRFS_IDENTITY_REMAP_KEY 234
+#define BTRFS_REMAP_KEY 235
+#define BTRFS_REMAP_BACKREF_KEY 236
+
/*
* Records the overall state of the qgroups.
* There's only one instance of this key present,
@@ -714,9 +721,12 @@ struct btrfs_super_block {
__u8 metadata_uuid[BTRFS_FSID_SIZE];
__u64 nr_global_roots;
+ __le64 remap_root;
+ __le64 remap_root_generation;
+ __u8 remap_root_level;
/* Future expansion */
- __le64 reserved[27];
+ __u8 reserved[199];
__u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE];
struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS];
@@ -1161,12 +1171,15 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8)
#define BTRFS_BLOCK_GROUP_RAID1C3 (1ULL << 9)
#define BTRFS_BLOCK_GROUP_RAID1C4 (1ULL << 10)
+#define BTRFS_BLOCK_GROUP_REMAPPED (1ULL << 11)
+#define BTRFS_BLOCK_GROUP_METADATA_REMAP (1ULL << 12)
#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
BTRFS_SPACE_INFO_GLOBAL_RSV)
#define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \
BTRFS_BLOCK_GROUP_SYSTEM | \
- BTRFS_BLOCK_GROUP_METADATA)
+ BTRFS_BLOCK_GROUP_METADATA | \
+ BTRFS_BLOCK_GROUP_METADATA_REMAP)
#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
BTRFS_BLOCK_GROUP_RAID1 | \
@@ -1219,6 +1232,14 @@ struct btrfs_block_group_item {
__le64 flags;
} __attribute__ ((__packed__));
+struct btrfs_block_group_item_v2 {
+ __le64 used;
+ __le64 chunk_objectid;
+ __le64 flags;
+ __le64 remap_bytes;
+ __le32 identity_remap_count;
+} __attribute__ ((__packed__));
+
struct btrfs_free_space_info {
__le32 extent_count;
__le32 flags;
@@ -1323,4 +1344,13 @@ struct btrfs_verity_descriptor_item {
__u8 encryption;
} __attribute__ ((__packed__));
+/*
+ * For a range identified by a BTRFS_REMAP_KEY item in the remap tree, gives
+ * the address that the start of the range will get remapped to. This
+ * structure is also shared by BTRFS_REMAP_BACKREF_KEY.
+ */
+struct btrfs_remap_item {
+ __le64 address;
+} __attribute__ ((__packed__));
+
#endif /* _BTRFS_CTREE_H_ */
diff --git a/include/uapi/linux/comedi.h b/include/uapi/linux/comedi.h
index 7314e5ee0a1e..798ec9a39e12 100644
--- a/include/uapi/linux/comedi.h
+++ b/include/uapi/linux/comedi.h
@@ -640,7 +640,7 @@ struct comedi_chaninfo {
/**
* struct comedi_rangeinfo - used to retrieve the range table for a channel
- * @range_type: Encodes subdevice index (bits 27:24), channel index
+ * @range_type: Encodes subdevice index (bits 31:24), channel index
* (bits 23:16) and range table length (bits 15:0).
* @range_ptr: Pointer to array of @struct comedi_krange to be filled
* in with the range table for the channel or subdevice.
diff --git a/include/uapi/linux/dev_energymodel.h b/include/uapi/linux/dev_energymodel.h
new file mode 100644
index 000000000000..355d8885c9a0
--- /dev/null
+++ b/include/uapi/linux/dev_energymodel.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/dev-energymodel.yaml */
+/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#ifndef _UAPI_LINUX_DEV_ENERGYMODEL_H
+#define _UAPI_LINUX_DEV_ENERGYMODEL_H
+
+#define DEV_ENERGYMODEL_FAMILY_NAME "dev-energymodel"
+#define DEV_ENERGYMODEL_FAMILY_VERSION 1
+
+/**
+ * enum dev_energymodel_perf_state_flags
+ * @DEV_ENERGYMODEL_PERF_STATE_FLAGS_PERF_STATE_INEFFICIENT: The performance
+ * state is inefficient. There is in this perf-domain, another performance
+ * state with a higher frequency but a lower or equal power cost.
+ */
+enum dev_energymodel_perf_state_flags {
+ DEV_ENERGYMODEL_PERF_STATE_FLAGS_PERF_STATE_INEFFICIENT = 1,
+};
+
+/**
+ * enum dev_energymodel_perf_domain_flags
+ * @DEV_ENERGYMODEL_PERF_DOMAIN_FLAGS_PERF_DOMAIN_MICROWATTS: The power values
+ * are in micro-Watts or some other scale.
+ * @DEV_ENERGYMODEL_PERF_DOMAIN_FLAGS_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip
+ * inefficient states when estimating energy consumption.
+ * @DEV_ENERGYMODEL_PERF_DOMAIN_FLAGS_PERF_DOMAIN_ARTIFICIAL: The power values
+ * are artificial and might be created by platform missing real power
+ * information.
+ */
+enum dev_energymodel_perf_domain_flags {
+ DEV_ENERGYMODEL_PERF_DOMAIN_FLAGS_PERF_DOMAIN_MICROWATTS = 1,
+ DEV_ENERGYMODEL_PERF_DOMAIN_FLAGS_PERF_DOMAIN_SKIP_INEFFICIENCIES = 2,
+ DEV_ENERGYMODEL_PERF_DOMAIN_FLAGS_PERF_DOMAIN_ARTIFICIAL = 4,
+};
+
+enum {
+ DEV_ENERGYMODEL_A_PERF_DOMAIN_PAD = 1,
+ DEV_ENERGYMODEL_A_PERF_DOMAIN_PERF_DOMAIN_ID,
+ DEV_ENERGYMODEL_A_PERF_DOMAIN_FLAGS,
+ DEV_ENERGYMODEL_A_PERF_DOMAIN_CPUS,
+
+ __DEV_ENERGYMODEL_A_PERF_DOMAIN_MAX,
+ DEV_ENERGYMODEL_A_PERF_DOMAIN_MAX = (__DEV_ENERGYMODEL_A_PERF_DOMAIN_MAX - 1)
+};
+
+enum {
+ DEV_ENERGYMODEL_A_PERF_TABLE_PERF_DOMAIN_ID = 1,
+ DEV_ENERGYMODEL_A_PERF_TABLE_PERF_STATE,
+
+ __DEV_ENERGYMODEL_A_PERF_TABLE_MAX,
+ DEV_ENERGYMODEL_A_PERF_TABLE_MAX = (__DEV_ENERGYMODEL_A_PERF_TABLE_MAX - 1)
+};
+
+enum {
+ DEV_ENERGYMODEL_A_PERF_STATE_PAD = 1,
+ DEV_ENERGYMODEL_A_PERF_STATE_PERFORMANCE,
+ DEV_ENERGYMODEL_A_PERF_STATE_FREQUENCY,
+ DEV_ENERGYMODEL_A_PERF_STATE_POWER,
+ DEV_ENERGYMODEL_A_PERF_STATE_COST,
+ DEV_ENERGYMODEL_A_PERF_STATE_FLAGS,
+
+ __DEV_ENERGYMODEL_A_PERF_STATE_MAX,
+ DEV_ENERGYMODEL_A_PERF_STATE_MAX = (__DEV_ENERGYMODEL_A_PERF_STATE_MAX - 1)
+};
+
+enum {
+ DEV_ENERGYMODEL_CMD_GET_PERF_DOMAINS = 1,
+ DEV_ENERGYMODEL_CMD_GET_PERF_TABLE,
+ DEV_ENERGYMODEL_CMD_PERF_DOMAIN_CREATED,
+ DEV_ENERGYMODEL_CMD_PERF_DOMAIN_UPDATED,
+ DEV_ENERGYMODEL_CMD_PERF_DOMAIN_DELETED,
+
+ __DEV_ENERGYMODEL_CMD_MAX,
+ DEV_ENERGYMODEL_CMD_MAX = (__DEV_ENERGYMODEL_CMD_MAX - 1)
+};
+
+#define DEV_ENERGYMODEL_MCGRP_EVENT "event"
+
+#endif /* _UAPI_LINUX_DEV_ENERGYMODEL_H */
diff --git a/include/uapi/linux/energy_model.h b/include/uapi/linux/energy_model.h
deleted file mode 100644
index 0bcad967854f..000000000000
--- a/include/uapi/linux/energy_model.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
-/* Do not edit directly, auto-generated from: */
-/* Documentation/netlink/specs/em.yaml */
-/* YNL-GEN uapi header */
-/* To regenerate run: tools/net/ynl/ynl-regen.sh */
-
-#ifndef _UAPI_LINUX_ENERGY_MODEL_H
-#define _UAPI_LINUX_ENERGY_MODEL_H
-
-#define EM_FAMILY_NAME "em"
-#define EM_FAMILY_VERSION 1
-
-enum {
- EM_A_PDS_PD = 1,
-
- __EM_A_PDS_MAX,
- EM_A_PDS_MAX = (__EM_A_PDS_MAX - 1)
-};
-
-enum {
- EM_A_PD_PAD = 1,
- EM_A_PD_PD_ID,
- EM_A_PD_FLAGS,
- EM_A_PD_CPUS,
-
- __EM_A_PD_MAX,
- EM_A_PD_MAX = (__EM_A_PD_MAX - 1)
-};
-
-enum {
- EM_A_PD_TABLE_PD_ID = 1,
- EM_A_PD_TABLE_PS,
-
- __EM_A_PD_TABLE_MAX,
- EM_A_PD_TABLE_MAX = (__EM_A_PD_TABLE_MAX - 1)
-};
-
-enum {
- EM_A_PS_PAD = 1,
- EM_A_PS_PERFORMANCE,
- EM_A_PS_FREQUENCY,
- EM_A_PS_POWER,
- EM_A_PS_COST,
- EM_A_PS_FLAGS,
-
- __EM_A_PS_MAX,
- EM_A_PS_MAX = (__EM_A_PS_MAX - 1)
-};
-
-enum {
- EM_CMD_GET_PDS = 1,
- EM_CMD_GET_PD_TABLE,
- EM_CMD_PD_CREATED,
- EM_CMD_PD_UPDATED,
- EM_CMD_PD_DELETED,
-
- __EM_CMD_MAX,
- EM_CMD_MAX = (__EM_CMD_MAX - 1)
-};
-
-#define EM_MCGRP_EVENT "event"
-
-#endif /* _UAPI_LINUX_ENERGY_MODEL_H */
diff --git a/include/uapi/linux/ext4.h b/include/uapi/linux/ext4.h
index 411dcc1e4a35..9c683991c32f 100644
--- a/include/uapi/linux/ext4.h
+++ b/include/uapi/linux/ext4.h
@@ -139,7 +139,7 @@ struct ext4_tune_sb_params {
__u32 clear_feature_incompat_mask;
__u32 clear_feature_ro_compat_mask;
__u8 mount_opts[64];
- __u8 pad[64];
+ __u8 pad[68];
};
#define EXT4_TUNE_FL_ERRORS_BEHAVIOR 0x00000001
diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
index b35871cbeed7..4f51e198ac2e 100644
--- a/include/uapi/linux/if_alg.h
+++ b/include/uapi/linux/if_alg.h
@@ -42,7 +42,7 @@ struct sockaddr_alg_new {
struct af_alg_iv {
__u32 ivlen;
- __u8 iv[];
+ __u8 iv[] __counted_by(ivlen);
};
/* Socket options */
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index b5b23c0d5283..da5156954731 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -237,6 +237,18 @@ enum io_uring_sqe_flags_bit {
*/
#define IORING_SETUP_SQE_MIXED (1U << 19)
+/*
+ * When set, io_uring ignores SQ head and tail and fetches SQEs to submit
+ * starting from index 0 instead from the index stored in the head pointer.
+ * IOW, the user should place all SQE at the beginning of the SQ memory
+ * before issuing a submission syscall.
+ *
+ * It requires IORING_SETUP_NO_SQARRAY and is incompatible with
+ * IORING_SETUP_SQPOLL. The user must also never change the SQ head and tail
+ * values and keep it set to 0. Any other value is undefined behaviour.
+ */
+#define IORING_SETUP_SQ_REWIND (1U << 20)
+
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@@ -700,6 +712,9 @@ enum io_uring_register_op {
/* auxiliary zcrx configuration, see enum zcrx_ctrl_op */
IORING_REGISTER_ZCRX_CTRL = 36,
+ /* register bpf filtering programs */
+ IORING_REGISTER_BPF_FILTER = 37,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -805,6 +820,13 @@ struct io_uring_restriction {
__u32 resv2[3];
};
+struct io_uring_task_restriction {
+ __u16 flags;
+ __u16 nr_res;
+ __u32 resv[3];
+ __DECLARE_FLEX_ARRAY(struct io_uring_restriction, restrictions);
+};
+
struct io_uring_clock_register {
__u32 clockid;
__u32 __resv[3];
diff --git a/include/uapi/linux/io_uring/bpf_filter.h b/include/uapi/linux/io_uring/bpf_filter.h
new file mode 100644
index 000000000000..220351b81bc0
--- /dev/null
+++ b/include/uapi/linux/io_uring/bpf_filter.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
+/*
+ * Header file for the io_uring BPF filters.
+ */
+#ifndef LINUX_IO_URING_BPF_FILTER_H
+#define LINUX_IO_URING_BPF_FILTER_H
+
+#include <linux/types.h>
+
+/*
+ * Struct passed to filters.
+ */
+struct io_uring_bpf_ctx {
+ __u64 user_data;
+ __u8 opcode;
+ __u8 sqe_flags;
+ __u8 pdu_size; /* size of aux data for filter */
+ __u8 pad[5];
+ union {
+ struct {
+ __u32 family;
+ __u32 type;
+ __u32 protocol;
+ } socket;
+ struct {
+ __u64 flags;
+ __u64 mode;
+ __u64 resolve;
+ } open;
+ };
+};
+
+enum {
+ /*
+ * If set, any currently unset opcode will have a deny filter attached
+ */
+ IO_URING_BPF_FILTER_DENY_REST = 1,
+};
+
+struct io_uring_bpf_filter {
+ __u32 opcode; /* io_uring opcode to filter */
+ __u32 flags;
+ __u32 filter_len; /* number of BPF instructions */
+ __u32 resv;
+ __u64 filter_ptr; /* pointer to BPF filter */
+ __u64 resv2[5];
+};
+
+enum {
+ IO_URING_BPF_CMD_FILTER = 1,
+};
+
+struct io_uring_bpf {
+ __u16 cmd_type; /* IO_URING_BPF_* values */
+ __u16 cmd_flags; /* none so far */
+ __u32 resv;
+ union {
+ struct io_uring_bpf_filter filter;
+ };
+};
+
+#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index dddb781b0507..88cca0e22ece 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -180,6 +180,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_MEMORY_FAULT 39
#define KVM_EXIT_TDX 40
#define KVM_EXIT_ARM_SEA 41
+#define KVM_EXIT_ARM_LDST64B 42
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -402,7 +403,7 @@ struct kvm_run {
} eoi;
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
- /* KVM_EXIT_ARM_NISV */
+ /* KVM_EXIT_ARM_NISV / KVM_EXIT_ARM_LDST64B */
struct {
__u64 esr_iss;
__u64 fault_ipa;
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index f030adc462ee..75fd7f5e6cc3 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -216,6 +216,23 @@ struct landlock_net_port_attr {
* :manpage:`ftruncate(2)`, :manpage:`creat(2)`, or :manpage:`open(2)` with
* ``O_TRUNC``. This access right is available since the third version of the
* Landlock ABI.
+ * - %LANDLOCK_ACCESS_FS_IOCTL_DEV: Invoke :manpage:`ioctl(2)` commands on an opened
+ * character or block device.
+ *
+ * This access right applies to all `ioctl(2)` commands implemented by device
+ * drivers. However, the following common IOCTL commands continue to be
+ * invokable independent of the %LANDLOCK_ACCESS_FS_IOCTL_DEV right:
+ *
+ * * IOCTL commands targeting file descriptors (``FIOCLEX``, ``FIONCLEX``),
+ * * IOCTL commands targeting file descriptions (``FIONBIO``, ``FIOASYNC``),
+ * * IOCTL commands targeting file systems (``FIFREEZE``, ``FITHAW``,
+ * ``FIGETBSZ``, ``FS_IOC_GETFSUUID``, ``FS_IOC_GETFSSYSFSPATH``)
+ * * Some IOCTL commands which do not make sense when used with devices, but
+ * whose implementations are safe and return the right error codes
+ * (``FS_IOC_FIEMAP``, ``FICLONE``, ``FICLONERANGE``, ``FIDEDUPERANGE``)
+ *
+ * This access right is available since the fifth version of the Landlock
+ * ABI.
*
* Whether an opened file can be truncated with :manpage:`ftruncate(2)` or used
* with `ioctl(2)` is determined during :manpage:`open(2)`, in the same way as
@@ -275,26 +292,6 @@ struct landlock_net_port_attr {
* If multiple requirements are not met, the ``EACCES`` error code takes
* precedence over ``EXDEV``.
*
- * The following access right applies both to files and directories:
- *
- * - %LANDLOCK_ACCESS_FS_IOCTL_DEV: Invoke :manpage:`ioctl(2)` commands on an opened
- * character or block device.
- *
- * This access right applies to all `ioctl(2)` commands implemented by device
- * drivers. However, the following common IOCTL commands continue to be
- * invokable independent of the %LANDLOCK_ACCESS_FS_IOCTL_DEV right:
- *
- * * IOCTL commands targeting file descriptors (``FIOCLEX``, ``FIONCLEX``),
- * * IOCTL commands targeting file descriptions (``FIONBIO``, ``FIOASYNC``),
- * * IOCTL commands targeting file systems (``FIFREEZE``, ``FITHAW``,
- * ``FIGETBSZ``, ``FS_IOC_GETFSUUID``, ``FS_IOC_GETFSSYSFSPATH``)
- * * Some IOCTL commands which do not make sense when used with devices, but
- * whose implementations are safe and return the right error codes
- * (``FS_IOC_FIEMAP``, ``FICLONE``, ``FICLONERANGE``, ``FIDEDUPERANGE``)
- *
- * This access right is available since the fifth version of the Landlock
- * ABI.
- *
* .. warning::
*
* It is currently not possible to restrict some file-related actions
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 638ca21b7a90..4f2da935a76c 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -104,5 +104,6 @@
#define SECRETMEM_MAGIC 0x5345434d /* "SECM" */
#define PID_FS_MAGIC 0x50494446 /* "PIDF" */
#define GUEST_MEMFD_MAGIC 0x474d454d /* "GMEM" */
+#define NULL_FS_MAGIC 0x4E554C4C /* "NULL" */
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/media/arm/mali-c55-config.h b/include/uapi/linux/media/arm/mali-c55-config.h
index 109082c5694f..3d335f950eeb 100644
--- a/include/uapi/linux/media/arm/mali-c55-config.h
+++ b/include/uapi/linux/media/arm/mali-c55-config.h
@@ -195,15 +195,6 @@ struct mali_c55_stats_buffer {
} __attribute__((packed));
/**
- * enum mali_c55_param_buffer_version - Mali-C55 parameters block versioning
- *
- * @MALI_C55_PARAM_BUFFER_V1: First version of Mali-C55 parameters block
- */
-enum mali_c55_param_buffer_version {
- MALI_C55_PARAM_BUFFER_V1,
-};
-
-/**
* enum mali_c55_param_block_type - Enumeration of Mali-C55 parameter blocks
*
* This enumeration defines the types of Mali-C55 parameters block. Each block
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index 5d3f8c9e3a62..d9d86598d100 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -61,7 +61,8 @@
/*
* open_tree() flags.
*/
-#define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */
+#define OPEN_TREE_CLONE (1 << 0) /* Clone the target tree and attach the clone */
+#define OPEN_TREE_NAMESPACE (1 << 1) /* Clone the target tree into a new mount namespace */
#define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */
/*
@@ -197,7 +198,10 @@ struct statmount {
*/
struct mnt_id_req {
__u32 size;
- __u32 mnt_ns_fd;
+ union {
+ __u32 mnt_ns_fd;
+ __u32 mnt_fd;
+ };
__u64 mnt_id;
__u64 param;
__u64 mnt_ns_id;
@@ -232,4 +236,9 @@ struct mnt_id_req {
#define LSMT_ROOT 0xffffffffffffffff /* root mount */
#define LISTMOUNT_REVERSE (1 << 0) /* List later mounts first */
+/*
+ * @flag bits for statmount(2)
+ */
+#define STATMOUNT_BY_FD 0x00000001U /* want mountinfo for given fd */
+
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h
index f356f2ba3814..71c7196d3281 100644
--- a/include/uapi/linux/nfs.h
+++ b/include/uapi/linux/nfs.h
@@ -49,7 +49,6 @@
NFSERR_NOENT = 2, /* v2 v3 v4 */
NFSERR_IO = 5, /* v2 v3 v4 */
NFSERR_NXIO = 6, /* v2 v3 v4 */
- NFSERR_EAGAIN = 11, /* v2 v3 */
NFSERR_ACCES = 13, /* v2 v3 v4 */
NFSERR_EXIST = 17, /* v2 v3 v4 */
NFSERR_XDEV = 18, /* v3 v4 */
diff --git a/include/uapi/linux/nilfs2_api.h b/include/uapi/linux/nilfs2_api.h
index 8b9b89104f3d..d1b6fcde2fb8 100644
--- a/include/uapi/linux/nilfs2_api.h
+++ b/include/uapi/linux/nilfs2_api.h
@@ -58,7 +58,7 @@ NILFS_CPINFO_FNS(INVALID, invalid)
NILFS_CPINFO_FNS(MINOR, minor)
/**
- * nilfs_suinfo - segment usage information
+ * struct nilfs_suinfo - segment usage information
* @sui_lastmod: timestamp of last modification
* @sui_nblocks: number of written blocks in segment
* @sui_flags: segment usage flags
@@ -93,7 +93,7 @@ static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si)
}
/**
- * nilfs_suinfo_update - segment usage information update
+ * struct nilfs_suinfo_update - segment usage information update
* @sup_segnum: segment number
* @sup_flags: flags for which fields are active in sup_sui
* @sup_reserved: reserved necessary for alignment
diff --git a/include/uapi/linux/nilfs2_ondisk.h b/include/uapi/linux/nilfs2_ondisk.h
index 3196cc44a002..b3442b16ff6a 100644
--- a/include/uapi/linux/nilfs2_ondisk.h
+++ b/include/uapi/linux/nilfs2_ondisk.h
@@ -133,73 +133,104 @@ struct nilfs_super_root {
/**
* struct nilfs_super_block - structure of super block on disk
+ * @s_rev_level: Revision level
+ * @s_minor_rev_level: minor revision level
+ * @s_magic: Magic signature
+ * @s_bytes: Bytes count of CRC calculation for
+ * this structure. s_reserved is excluded.
+ * @s_flags: flags
+ * @s_crc_seed: Seed value of CRC calculation
+ * @s_sum: Check sum of super block
+ * @s_log_block_size: Block size represented as follows:
+ * blocksize = 1 << (s_log_block_size + 10)
+ * @s_nsegments: Number of segments in filesystem
+ * @s_dev_size: block device size in bytes
+ * @s_first_data_block: 1st seg disk block number
+ * @s_blocks_per_segment: number of blocks per full segment
+ * @s_r_segments_percentage: Reserved segments percentage
+ * @s_last_cno: Last checkpoint number
+ * @s_last_pseg: disk block addr pseg written last
+ * @s_last_seq: seq. number of seg written last
+ * @s_free_blocks_count: Free blocks count
+ * @s_ctime: Creation time (execution time of newfs)
+ * @s_mtime: Mount time
+ * @s_wtime: Write time
+ * @s_mnt_count: Mount count
+ * @s_max_mnt_count: Maximal mount count
+ * @s_state: File system state
+ * @s_errors: Behaviour when detecting errors
+ * @s_lastcheck: time of last check
+ * @s_checkinterval: max. time between checks
+ * @s_creator_os: OS
+ * @s_def_resuid: Default uid for reserved blocks
+ * @s_def_resgid: Default gid for reserved blocks
+ * @s_first_ino: First non-reserved inode
+ * @s_inode_size: Size of an inode
+ * @s_dat_entry_size: Size of a dat entry
+ * @s_checkpoint_size: Size of a checkpoint
+ * @s_segment_usage_size: Size of a segment usage
+ * @s_uuid: 128-bit uuid for volume
+ * @s_volume_name: volume name
+ * @s_c_interval: Commit interval of segment
+ * @s_c_block_max: Threshold of data amount for the
+ * segment construction
+ * @s_feature_compat: Compatible feature set
+ * @s_feature_compat_ro: Read-only compatible feature set
+ * @s_feature_incompat: Incompatible feature set
+ * @s_reserved: padding to the end of the block
*/
struct nilfs_super_block {
-/*00*/ __le32 s_rev_level; /* Revision level */
- __le16 s_minor_rev_level; /* minor revision level */
- __le16 s_magic; /* Magic signature */
-
- __le16 s_bytes; /*
- * Bytes count of CRC calculation
- * for this structure. s_reserved
- * is excluded.
- */
- __le16 s_flags; /* flags */
- __le32 s_crc_seed; /* Seed value of CRC calculation */
-/*10*/ __le32 s_sum; /* Check sum of super block */
-
- __le32 s_log_block_size; /*
- * Block size represented as follows
- * blocksize =
- * 1 << (s_log_block_size + 10)
- */
- __le64 s_nsegments; /* Number of segments in filesystem */
-/*20*/ __le64 s_dev_size; /* block device size in bytes */
- __le64 s_first_data_block; /* 1st seg disk block number */
-/*30*/ __le32 s_blocks_per_segment; /* number of blocks per full segment */
- __le32 s_r_segments_percentage; /* Reserved segments percentage */
-
- __le64 s_last_cno; /* Last checkpoint number */
-/*40*/ __le64 s_last_pseg; /* disk block addr pseg written last */
- __le64 s_last_seq; /* seq. number of seg written last */
-/*50*/ __le64 s_free_blocks_count; /* Free blocks count */
-
- __le64 s_ctime; /*
- * Creation time (execution time of
- * newfs)
- */
-/*60*/ __le64 s_mtime; /* Mount time */
- __le64 s_wtime; /* Write time */
-/*70*/ __le16 s_mnt_count; /* Mount count */
- __le16 s_max_mnt_count; /* Maximal mount count */
- __le16 s_state; /* File system state */
- __le16 s_errors; /* Behaviour when detecting errors */
- __le64 s_lastcheck; /* time of last check */
-
-/*80*/ __le32 s_checkinterval; /* max. time between checks */
- __le32 s_creator_os; /* OS */
- __le16 s_def_resuid; /* Default uid for reserved blocks */
- __le16 s_def_resgid; /* Default gid for reserved blocks */
- __le32 s_first_ino; /* First non-reserved inode */
-
-/*90*/ __le16 s_inode_size; /* Size of an inode */
- __le16 s_dat_entry_size; /* Size of a dat entry */
- __le16 s_checkpoint_size; /* Size of a checkpoint */
- __le16 s_segment_usage_size; /* Size of a segment usage */
-
-/*98*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */
-/*A8*/ char s_volume_name[80] /* volume name */
- __kernel_nonstring;
-
-/*F8*/ __le32 s_c_interval; /* Commit interval of segment */
- __le32 s_c_block_max; /*
- * Threshold of data amount for
- * the segment construction
- */
-/*100*/ __le64 s_feature_compat; /* Compatible feature set */
- __le64 s_feature_compat_ro; /* Read-only compatible feature set */
- __le64 s_feature_incompat; /* Incompatible feature set */
- __u32 s_reserved[186]; /* padding to the end of the block */
+/*00*/ __le32 s_rev_level;
+ __le16 s_minor_rev_level;
+ __le16 s_magic;
+
+ __le16 s_bytes;
+ __le16 s_flags;
+ __le32 s_crc_seed;
+/*10*/ __le32 s_sum;
+
+ __le32 s_log_block_size;
+ __le64 s_nsegments;
+/*20*/ __le64 s_dev_size;
+ __le64 s_first_data_block;
+/*30*/ __le32 s_blocks_per_segment;
+ __le32 s_r_segments_percentage;
+
+ __le64 s_last_cno;
+/*40*/ __le64 s_last_pseg;
+ __le64 s_last_seq;
+/*50*/ __le64 s_free_blocks_count;
+
+ __le64 s_ctime;
+/*60*/ __le64 s_mtime;
+ __le64 s_wtime;
+/*70*/ __le16 s_mnt_count;
+ __le16 s_max_mnt_count;
+ __le16 s_state;
+ __le16 s_errors;
+ __le64 s_lastcheck;
+
+/*80*/ __le32 s_checkinterval;
+ __le32 s_creator_os;
+ __le16 s_def_resuid;
+ __le16 s_def_resgid;
+ __le32 s_first_ino;
+
+/*90*/ __le16 s_inode_size;
+ __le16 s_dat_entry_size;
+ __le16 s_checkpoint_size;
+ __le16 s_segment_usage_size;
+
+/*98*/ __u8 s_uuid[16];
+/*A8*/ char s_volume_name[80] __kernel_nonstring;
+
+/*F8*/ __le32 s_c_interval;
+ __le32 s_c_block_max;
+
+/*100*/ __le64 s_feature_compat;
+ __le64 s_feature_compat_ro;
+ __le64 s_feature_incompat;
+ __u32 s_reserved[186];
};
/*
@@ -449,7 +480,7 @@ struct nilfs_btree_node {
/**
* struct nilfs_direct_node - header of built-in bmap array
* @dn_flags: flags
- * @dn_pad: padding
+ * @pad: padding
*/
struct nilfs_direct_node {
__u8 dn_flags;
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 8134f10e4e6c..8433bac48112 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2880,8 +2880,9 @@ enum nl80211_commands {
* index. If the userspace includes more RNR elements than number of
* MBSSID elements then these will be added in every EMA beacon.
*
- * @NL80211_ATTR_MLO_LINK_DISABLED: Flag attribute indicating that the link is
- * disabled.
+ * @NL80211_ATTR_MLO_LINK_DISABLED: Unused. It was used to indicate that a link
+ * is disabled during association. However, the AP will send the
+ * information by including a TTLM in the association response.
*
* @NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA: Include BSS usage data, i.e.
* include BSSes that can only be used in restricted scenarios and/or
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index c44a8fb3e418..fd10aa8d697f 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -2,7 +2,7 @@
/*
* Performance events:
*
- * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
* Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
*
@@ -1330,14 +1330,16 @@ union perf_mem_data_src {
mem_snoopx : 2, /* Snoop mode, ext */
mem_blk : 3, /* Access blocked */
mem_hops : 3, /* Hop level */
- mem_rsvd : 18;
+ mem_region : 5, /* cache/memory regions */
+ mem_rsvd : 13;
};
};
#elif defined(__BIG_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
- __u64 mem_rsvd : 18,
+ __u64 mem_rsvd : 13,
+ mem_region : 5, /* cache/memory regions */
mem_hops : 3, /* Hop level */
mem_blk : 3, /* Access blocked */
mem_snoopx : 2, /* Snoop mode, ext */
@@ -1394,7 +1396,7 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */
#define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */
-/* 0x007 available */
+#define PERF_MEM_LVLNUM_L0 0x0007 /* L0 */
#define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */
#define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x000a /* I/O */
@@ -1447,6 +1449,25 @@ union perf_mem_data_src {
/* 5-7 available */
#define PERF_MEM_HOPS_SHIFT 43
+/* Cache/Memory region */
+#define PERF_MEM_REGION_NA 0x0 /* Invalid */
+#define PERF_MEM_REGION_RSVD 0x01 /* Reserved */
+#define PERF_MEM_REGION_L_SHARE 0x02 /* Local CA shared cache */
+#define PERF_MEM_REGION_L_NON_SHARE 0x03 /* Local CA non-shared cache */
+#define PERF_MEM_REGION_O_IO 0x04 /* Other CA IO agent */
+#define PERF_MEM_REGION_O_SHARE 0x05 /* Other CA shared cache */
+#define PERF_MEM_REGION_O_NON_SHARE 0x06 /* Other CA non-shared cache */
+#define PERF_MEM_REGION_MMIO 0x07 /* MMIO */
+#define PERF_MEM_REGION_MEM0 0x08 /* Memory region 0 */
+#define PERF_MEM_REGION_MEM1 0x09 /* Memory region 1 */
+#define PERF_MEM_REGION_MEM2 0x0a /* Memory region 2 */
+#define PERF_MEM_REGION_MEM3 0x0b /* Memory region 3 */
+#define PERF_MEM_REGION_MEM4 0x0c /* Memory region 4 */
+#define PERF_MEM_REGION_MEM5 0x0d /* Memory region 5 */
+#define PERF_MEM_REGION_MEM6 0x0e /* Memory region 6 */
+#define PERF_MEM_REGION_MEM7 0x0f /* Memory region 7 */
+#define PERF_MEM_REGION_SHIFT 46
+
#define PERF_MEM_S(a, s) \
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 51c4e8c82b1e..79944b7ae50a 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -386,4 +386,14 @@ struct prctl_mm_map {
# define PR_FUTEX_HASH_SET_SLOTS 1
# define PR_FUTEX_HASH_GET_SLOTS 2
+/* RSEQ time slice extensions */
+#define PR_RSEQ_SLICE_EXTENSION 79
+# define PR_RSEQ_SLICE_EXTENSION_GET 1
+# define PR_RSEQ_SLICE_EXTENSION_SET 2
+/*
+ * Bits for RSEQ_SLICE_EXTENSION_GET/SET
+ * PR_RSEQ_SLICE_EXT_ENABLE: Enable
+ */
+# define PR_RSEQ_SLICE_EXT_ENABLE 0x01
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
index 1b76d508400c..863c4a00a66b 100644
--- a/include/uapi/linux/rseq.h
+++ b/include/uapi/linux/rseq.h
@@ -19,13 +19,20 @@ enum rseq_cpu_id_state {
};
enum rseq_flags {
- RSEQ_FLAG_UNREGISTER = (1 << 0),
+ RSEQ_FLAG_UNREGISTER = (1 << 0),
+ RSEQ_FLAG_SLICE_EXT_DEFAULT_ON = (1 << 1),
};
enum rseq_cs_flags_bit {
+ /* Historical and unsupported bits */
RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0,
RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1,
RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2,
+ /* (3) Intentional gap to put new bits into a separate byte */
+
+ /* User read only feature flags */
+ RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE_BIT = 4,
+ RSEQ_CS_FLAG_SLICE_EXT_ENABLED_BIT = 5,
};
enum rseq_cs_flags {
@@ -35,6 +42,11 @@ enum rseq_cs_flags {
(1U << RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT),
RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE =
(1U << RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT),
+
+ RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE =
+ (1U << RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE_BIT),
+ RSEQ_CS_FLAG_SLICE_EXT_ENABLED =
+ (1U << RSEQ_CS_FLAG_SLICE_EXT_ENABLED_BIT),
};
/*
@@ -53,6 +65,27 @@ struct rseq_cs {
__u64 abort_ip;
} __attribute__((aligned(4 * sizeof(__u64))));
+/**
+ * rseq_slice_ctrl - Time slice extension control structure
+ * @all: Compound value
+ * @request: Request for a time slice extension
+ * @granted: Granted time slice extension
+ *
+ * @request is set by user space and can be cleared by user space or kernel
+ * space. @granted is set and cleared by the kernel and must only be read
+ * by user space.
+ */
+struct rseq_slice_ctrl {
+ union {
+ __u32 all;
+ struct {
+ __u8 request;
+ __u8 granted;
+ __u16 __reserved;
+ };
+ };
+};
+
/*
* struct rseq is aligned on 4 * 8 bytes to ensure it is always
* contained within a single cache-line.
@@ -142,6 +175,12 @@ struct rseq {
__u32 mm_cid;
/*
+ * Time slice extension control structure. CPU local updates from
+ * kernel and user space.
+ */
+ struct rseq_slice_ctrl slice_ctrl;
+
+ /*
* Flexible array member at end of structure, after last feature field.
*/
char end[];
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index 9a28f7d9a334..111b097ec00b 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -72,6 +72,10 @@
#define __counted_by_be(m)
#endif
+#ifndef __counted_by_ptr
+#define __counted_by_ptr(m)
+#endif
+
#ifdef __KERNEL__
#define __kernel_nonstring __nonstring
#else
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 63d1464cb71c..1c7fe0f4dca4 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -92,7 +92,6 @@ enum
KERN_DOMAINNAME=8, /* string: domainname */
KERN_PANIC=15, /* int: panic timeout */
- KERN_REALROOTDEV=16, /* real root device to mount after initrd */
KERN_SPARC_REBOOT=21, /* reboot command on Sparc */
KERN_CTLALTDEL=22, /* int: allow ctl-alt-del to reboot */
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index ec77dabba45b..a88876756805 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -55,7 +55,8 @@
_IOWR('u', 0x15, struct ublksrv_ctrl_cmd)
#define UBLK_U_CMD_QUIESCE_DEV \
_IOWR('u', 0x16, struct ublksrv_ctrl_cmd)
-
+#define UBLK_U_CMD_TRY_STOP_DEV \
+ _IOWR('u', 0x17, struct ublksrv_ctrl_cmd)
/*
* 64bits are enough now, and it should be easy to extend in case of
* running out of feature flags
@@ -103,6 +104,30 @@
#define UBLK_U_IO_UNREGISTER_IO_BUF \
_IOWR('u', 0x24, struct ublksrv_io_cmd)
+/*
+ * return 0 if the command is run successfully, otherwise failure code
+ * is returned
+ */
+#define UBLK_U_IO_PREP_IO_CMDS \
+ _IOWR('u', 0x25, struct ublk_batch_io)
+/*
+ * If failure code is returned, nothing in the command buffer is handled.
+ * Otherwise, the returned value means how many bytes in command buffer
+ * are handled actually, then number of handled IOs can be calculated with
+ * `elem_bytes` for each IO. IOs in the remained bytes are not committed,
+ * userspace has to check return value for dealing with partial committing
+ * correctly.
+ */
+#define UBLK_U_IO_COMMIT_IO_CMDS \
+ _IOWR('u', 0x26, struct ublk_batch_io)
+
+/*
+ * Fetch io commands to provided buffer in multishot style,
+ * `IORING_URING_CMD_MULTISHOT` is required for this command.
+ */
+#define UBLK_U_IO_FETCH_IO_CMDS \
+ _IOWR('u', 0x27, struct ublk_batch_io)
+
/* only ABORT means that no re-fetch */
#define UBLK_IO_RES_OK 0
#define UBLK_IO_RES_NEED_GET_DATA 1
@@ -134,6 +159,10 @@
#define UBLKSRV_IO_BUF_TOTAL_BITS (UBLK_QID_OFF + UBLK_QID_BITS)
#define UBLKSRV_IO_BUF_TOTAL_SIZE (1ULL << UBLKSRV_IO_BUF_TOTAL_BITS)
+/* Copy to/from request integrity buffer instead of data buffer */
+#define UBLK_INTEGRITY_FLAG_OFF 62
+#define UBLKSRV_IO_INTEGRITY_FLAG (1ULL << UBLK_INTEGRITY_FLAG_OFF)
+
/*
* ublk server can register data buffers for incoming I/O requests with a sparse
* io_uring buffer table. The request buffer can then be used as the data buffer
@@ -311,6 +340,36 @@
*/
#define UBLK_F_BUF_REG_OFF_DAEMON (1ULL << 14)
+/*
+ * Support the following commands for delivering & committing io command
+ * in batch.
+ *
+ * - UBLK_U_IO_PREP_IO_CMDS
+ * - UBLK_U_IO_COMMIT_IO_CMDS
+ * - UBLK_U_IO_FETCH_IO_CMDS
+ * - UBLK_U_IO_REGISTER_IO_BUF
+ * - UBLK_U_IO_UNREGISTER_IO_BUF
+ *
+ * The existing UBLK_U_IO_FETCH_REQ, UBLK_U_IO_COMMIT_AND_FETCH_REQ and
+ * UBLK_U_IO_NEED_GET_DATA uring_cmd are not supported for this feature.
+ */
+#define UBLK_F_BATCH_IO (1ULL << 15)
+
+/*
+ * ublk device supports requests with integrity/metadata buffer.
+ * Requires UBLK_F_USER_COPY.
+ */
+#define UBLK_F_INTEGRITY (1ULL << 16)
+
+/*
+ * The device supports the UBLK_CMD_TRY_STOP_DEV command, which
+ * allows stopping the device only if there are no openers.
+ */
+#define UBLK_F_SAFE_STOP_DEV (1ULL << 17)
+
+/* Disable automatic partition scanning when device is started */
+#define UBLK_F_NO_AUTO_PART_SCAN (1ULL << 18)
+
/* device state */
#define UBLK_S_DEV_DEAD 0
#define UBLK_S_DEV_LIVE 1
@@ -408,6 +467,8 @@ struct ublksrv_ctrl_dev_info {
* passed in.
*/
#define UBLK_IO_F_NEED_REG_BUF (1U << 17)
+/* Request has an integrity data buffer */
+#define UBLK_IO_F_INTEGRITY (1UL << 18)
/*
* io cmd is described by this structure, and stored in share memory, indexed
@@ -525,6 +586,51 @@ struct ublksrv_io_cmd {
};
};
+struct ublk_elem_header {
+ __u16 tag; /* IO tag */
+
+ /*
+ * Buffer index for incoming io command, only valid iff
+ * UBLK_F_AUTO_BUF_REG is set
+ */
+ __u16 buf_index;
+ __s32 result; /* I/O completion result (commit only) */
+};
+
+/*
+ * uring_cmd buffer structure for batch commands
+ *
+ * buffer includes multiple elements, which number is specified by
+ * `nr_elem`. Each element buffer is organized in the following order:
+ *
+ * struct ublk_elem_buffer {
+ * // Mandatory fields (8 bytes)
+ * struct ublk_elem_header header;
+ *
+ * // Optional fields (8 bytes each, included based on flags)
+ *
+ * // Buffer address (if UBLK_BATCH_F_HAS_BUF_ADDR) for copying data
+ * // between ublk request and ublk server buffer
+ * __u64 buf_addr;
+ *
+ * // returned Zone append LBA (if UBLK_BATCH_F_HAS_ZONE_LBA)
+ * __u64 zone_lba;
+ * }
+ *
+ * Used for `UBLK_U_IO_PREP_IO_CMDS` and `UBLK_U_IO_COMMIT_IO_CMDS`
+ */
+struct ublk_batch_io {
+ __u16 q_id;
+#define UBLK_BATCH_F_HAS_ZONE_LBA (1 << 0)
+#define UBLK_BATCH_F_HAS_BUF_ADDR (1 << 1)
+#define UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK (1 << 2)
+ __u16 flags;
+ __u16 nr_elem;
+ __u8 elem_bytes;
+ __u8 reserved;
+ __u64 reserved2;
+};
+
struct ublk_param_basic {
#define UBLK_ATTR_READ_ONLY (1 << 0)
#define UBLK_ATTR_ROTATIONAL (1 << 1)
@@ -600,6 +706,17 @@ struct ublk_param_segment {
__u8 pad[2];
};
+struct ublk_param_integrity {
+ __u32 flags; /* LBMD_PI_CAP_* from linux/fs.h */
+ __u16 max_integrity_segments; /* 0 means no limit */
+ __u8 interval_exp;
+ __u8 metadata_size; /* UBLK_PARAM_TYPE_INTEGRITY requires nonzero */
+ __u8 pi_offset;
+ __u8 csum_type; /* LBMD_PI_CSUM_* from linux/fs.h */
+ __u8 tag_size;
+ __u8 pad[5];
+};
+
struct ublk_params {
/*
* Total length of parameters, userspace has to set 'len' for both
@@ -614,6 +731,7 @@ struct ublk_params {
#define UBLK_PARAM_TYPE_ZONED (1 << 3)
#define UBLK_PARAM_TYPE_DMA_ALIGN (1 << 4)
#define UBLK_PARAM_TYPE_SEGMENT (1 << 5)
+#define UBLK_PARAM_TYPE_INTEGRITY (1 << 6) /* requires UBLK_F_INTEGRITY */
__u32 types; /* types of parameter included */
struct ublk_param_basic basic;
@@ -622,6 +740,7 @@ struct ublk_params {
struct ublk_param_zoned zoned;
struct ublk_param_dma_align dma;
struct ublk_param_segment seg;
+ struct ublk_param_integrity integrity;
};
#endif
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index c7c85bb504ba..2e5aef48fa7e 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -23,7 +23,7 @@
#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
struct xattr_args {
- __aligned_u64 __user value;
+ __aligned_u64 value;
__u32 size;
__u32 flags;
};
diff --git a/include/vdso/gettime.h b/include/vdso/gettime.h
index 9ac161866653..16a0a0556b86 100644
--- a/include/vdso/gettime.h
+++ b/include/vdso/gettime.h
@@ -20,5 +20,6 @@ int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
__kernel_old_time_t __vdso_time(__kernel_old_time_t *t);
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts);
+int __vdso_clock_getres_time64(clockid_t clock, struct __kernel_timespec *ts);
#endif
diff --git a/include/vdso/unaligned.h b/include/vdso/unaligned.h
index ff0c06b6513e..9076483c9fbb 100644
--- a/include/vdso/unaligned.h
+++ b/include/vdso/unaligned.h
@@ -2,14 +2,43 @@
#ifndef __VDSO_UNALIGNED_H
#define __VDSO_UNALIGNED_H
-#define __get_unaligned_t(type, ptr) ({ \
- const struct { type x; } __packed * __get_pptr = (typeof(__get_pptr))(ptr); \
- __get_pptr->x; \
+#include <linux/compiler_types.h>
+
+/**
+ * __get_unaligned_t - read an unaligned value from memory.
+ * @type: the type to load from the pointer.
+ * @ptr: the pointer to load from.
+ *
+ * Use memcpy to affect an unaligned type sized load avoiding undefined behavior
+ * from approaches like type punning that require -fno-strict-aliasing in order
+ * to be correct. As type may be const, use __unqual_scalar_typeof to map to a
+ * non-const type - you can't memcpy into a const type. The
+ * __get_unaligned_ctrl_type gives __unqual_scalar_typeof its required
+ * expression rather than type, a pointer is used to avoid warnings about mixing
+ * the use of 0 and NULL. The void* cast silences ubsan warnings.
+ */
+#define __get_unaligned_t(type, ptr) ({ \
+ type *__get_unaligned_ctrl_type __always_unused = NULL; \
+ __unqual_scalar_typeof(*__get_unaligned_ctrl_type) __get_unaligned_val; \
+ __builtin_memcpy(&__get_unaligned_val, (void *)(ptr), \
+ sizeof(__get_unaligned_val)); \
+ __get_unaligned_val; \
})
-#define __put_unaligned_t(type, val, ptr) do { \
- struct { type x; } __packed * __put_pptr = (typeof(__put_pptr))(ptr); \
- __put_pptr->x = (val); \
+/**
+ * __put_unaligned_t - write an unaligned value to memory.
+ * @type: the type of the value to store.
+ * @val: the value to store.
+ * @ptr: the pointer to store to.
+ *
+ * Use memcpy to affect an unaligned type sized store avoiding undefined
+ * behavior from approaches like type punning that require -fno-strict-aliasing
+ * in order to be correct. The void* cast silences ubsan warnings.
+ */
+#define __put_unaligned_t(type, val, ptr) do { \
+ type __put_unaligned_val = (val); \
+ __builtin_memcpy((void *)(ptr), &__put_unaligned_val, \
+ sizeof(__put_unaligned_val)); \
} while (0)
#endif /* __VDSO_UNALIGNED_H */
diff --git a/include/video/edid.h b/include/video/edid.h
index c2b186b1933a..52aabb706032 100644
--- a/include/video/edid.h
+++ b/include/video/edid.h
@@ -4,8 +4,4 @@
#include <uapi/video/edid.h>
-#if defined(CONFIG_FIRMWARE_EDID)
-extern struct edid_info edid_info;
-#endif
-
#endif /* __linux_video_edid_h__ */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 61854e3f2837..f280c5dcf923 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -69,11 +69,13 @@ extern u64 xen_saved_max_mem_size;
#endif
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+extern unsigned long xen_unpopulated_pages;
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
#include <linux/ioport.h>
int arch_xen_unpopulated_init(struct resource **res);
#else
+#define xen_unpopulated_pages 0UL
#include <xen/balloon.h>
static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages,
struct page **pages)